~/f/dealii/RPMS.2017 ~/f/dealii ~/f/dealii RPMS.2017/deal_II-devel-9.5.1-1.1.x86_64.rpm RPMS/deal_II-devel-9.5.1-1.1.x86_64.rpm differ: byte 225, line 1 Comparing deal_II-devel-9.5.1-1.1.x86_64.rpm to deal_II-devel-9.5.1-1.1.x86_64.rpm comparing the rpm tags of deal_II-devel --- old-rpm-tags +++ new-rpm-tags @@ -10151 +10151 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 3896a041fcf1a1bfb078364e25548604a7298b8c5e8a4b1c16395ccc2d5fb51e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html ed3b823b49614dcaa42b138e94a630468791f74e3b8f4b3b641c79bc81eeab03 2 @@ -10154,3 +10154,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 3d6748d1073d70df612a7e71cb746663956b1caee6b21f8581d474c9e0de38c3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2987d67faac7699d953885ecfb154d83cdc597d75cb5d8df8d18db3eba386fc0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex a676293adaf9eb1679e827fcbcd2e3e6a4c62f62d7b157340076f627f9d64354 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 08060bc5d2992461f8691b50d9c9109481f92a50e1c5ebabe2213aea0b8b8645 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 453206fb2b7f14e22a41d3b99815b474ced1038854ef0208eff1228ea8d47ec3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex dbfa81338e0e3d9298857ab12eab7177222dbdbb447f521965a6b81c577b5e50 2 @@ -10322 +10322 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html d825a7150ffce4911e5c68b78d42ffd883d8b12d088b5fa5577ab627c8912654 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html f19f371027d617de067c3c3129a3975900d3d1019332ec44493549534163ecb5 2 @@ -10324 +10324 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html c264d9bdd47faed37199cda4d1bbe5eb62684b035f5abe4a3293ab55d1294168 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html cee522617733f4282183ce64bafab9b20c65f268a28f68170d87446ad03673dd 2 @@ -10332 +10332 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 6a8c023f55d4909a56b2703af48a8e046b8d2f98e8f417793590580e39c04acf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html a2ad6649d302abbc8a134f258797effc01d3ba19ec2e97c76ce8763747a03b54 2 @@ -10338 +10338 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html 98a5913d5dfd97499db7e7a8a4556baa2a9bbdca9f5a20a804a836b1b83c5fe0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html f198be15a7049e3972dc038240c43ae09b89768bba125504cfec32697dc684dd 2 @@ -10343 +10343 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2684b453a60538c7cc13a4ce746f3c4051f44cd7e7abab1da76057ba2481e209 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html da521ff062eb567325740745c8b2a7e21edb7ae8b6ca6b88de424bf79fed0215 2 @@ -10370 +10370 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html fc79c02f77da2c018530bd7f8941a85c82ebb7121604811dc2014263cd6e1975 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 4c0bb1427f1eae15689594ba9b13fdd3a616feb437f714d3df749f544dd6548f 2 @@ -10388 +10388 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 397286bff13aeb55778717a8fb4a58bc3ca13a484c9fbed375791ef18a03ff0c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html c660debc728184dfee74e2872c24a1e55e6968c0561c6d074928ce074aeee6c9 2 @@ -10405 +10405 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 0484b361965f1dcb1161766475ef72df75c8c3b8089f1a51e9d2027eecc1fbd5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 495c00cc68809e88ed2f05cd2c4d9ab92dca1d26ead1fb59d3d3354b9b59086a 2 @@ -10442 +10442 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html ff871ebdfa8676c7ca80b30189301b28c81d822e047d4393eb97a5f91a7bb0d9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 178557eafd0fc9a3c3ca0ff8fc29d0b9d163b378ea29bd3eeb4a8f37082a15eb 2 @@ -10445 +10445 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 5a46dabe65810f8e28178695b7bbd22fc5244c67838fe1c61a5b361e4aca4484 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html dbeb9d3a3522c2aeeaff257ed97191a13205d3708dd91d5f205740939122ffcb 2 @@ -10448 +10448 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2e6ed7a6d2584f0deaed276ca94aecf725726e4718a06c7965364d15e4d70412 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 57851191efd241f0c13ec402c06cdf4191ff844679bdadedffe219aeaaa0b029 2 @@ -10451 +10451 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 7e277f4f5cda85b0e606ef8b2bb8c21a4d8af0a037f19b9d2ec242ec74d19b28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 057b676d7b3403066c057c28b0c2d28c8385ff9e0d2988e51cedec678c378007 2 @@ -10458 +10458 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html d1005fdf1dae7b3d8782209620dd6a1b1f8c2a952379c28ed219e0e32f0f1ab4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html a29dbe391d4454f2cbda4541fca020de00bb2a55394706a9b0cf0eeb3569b5b9 2 @@ -10464 +10464 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 0b5ac9f89083d35eb62fdec2aae08474ba9636098d38d8b2367f6271eb523df0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html ec246b0fcb00ae6999f25a8eb79e2c7c9023e737199eaa5f9a2facb4549f9832 2 @@ -10470 +10470 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html baff47e299adca7114490cf0424f8ab8b5dff6744dc5aebec7631db4488bed5c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html b3d568d6c0f6055f5f6ac071f0b6896513c0e556ec510322b909a1c155932da5 2 @@ -10475 +10475 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html b0c56223a9226dea0813e17be79b5bc652c35e57916d77ee0093d36ed805fc85 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html a36fc4eaa76a7556c853d0c56ffcb24f12b2dc85a38c1ab43a7370e289effa9e 2 @@ -10488 +10488 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 878253318a97a6841fd9a3434b51e5396aac9971965e6ac8acec3d6aab8ddf0a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html d14ed9025876da3479fc9830d150a3e7c8ee25cc0174ebc190d99a13b58623e4 2 @@ -10490 +10490 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 9f2877b2743d409623472e63665d15bd880c9ef57cc3d5d9986000b1208f908c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 8e366f1958b960c5e20a566530a35c847a418398289063527a2f6b3dc9e23a50 2 @@ -10500 +10500 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html a61e7c3d25381a6922fee40f474a58b96c4508a825a250e9b47fcfae451db95f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html f7d6cfffc1360d5f62863144ef7fe068cfb501665b222c0fb64ec9716113c281 2 @@ -10502 +10502 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 07eb3af92ba4db82e2e56d938bf2b1463ff372186274e6d1cbc46beb1b51a9d9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html e00d469c25f070b3264a084801b7bb6d7f2b5e8d380cc6fe35e5ce79a648b022 2 @@ -10506 +10506 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 3e03c7bd79e7a4130dfc16776c4d36984da1276c69ea893d12fac028edd60ad0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 31a2dd1cf2e04543b42978889ddb2da01cd8ebe3b72ee4a5ddc5a71ebbc20669 2 @@ -10519 +10519 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html b44ba6ac7857e137928ab82391842b3bc562b44c2dffdb5625f62dd794f9e597 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 8fb9ea9ed1a8caeed87a712b6db9157ef665f6bc7716856cd904ba5841bf6aac 2 @@ -10521 +10521 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 8069bda5c49474c6e450dac51f804c8fc4e54851d7fa04ef022ae9fa87d08859 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 4ffd10e0ad053209d62186a9ce0e7d3b60ff8b6d6f61576fa435f6317ba9b645 2 @@ -10525 +10525 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 1701e53b4614f0bfa4d5a2e2517cc62e25b60994fac78cfbf3eecbf405d771fd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html b8978329dde3d7957a0812f44cdb5991488373dc47cb4f55113bc4f939f7a4de 2 @@ -10528 +10528 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html f02bbd00cae94d2f20a607e30ed6135b1b7021b25291eac015f8d32cc4fc73f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 5b35488982b331eca3eb995b22ec9b642bef73ab895bfceeebf19c39e7ba9893 2 @@ -10538 +10538 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 107328a68846125c0398d1a859a59c8fd188b0298f8d38ba3df6ae7e5e8c40d5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 93b0552b8031fd09b7e3827de5b29316b0688ffbef89f188f792a4843cd56157 2 @@ -10541 +10541 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2b27568bb432a76f78fc8db829d90b3f56f358329490fc80ab49b19e003cb869 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html c9f173b713311702d49db8e257e0f4f74aa2f276d874402cc6f53caa7f726ceb 2 @@ -10557 +10557 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html ce0fba123fe751272ae83d0ab03501e4f9742f5319ee9b98099fd748d6c64eb2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 21c7c90f299094f8bf7adb54e22142af5d9baab28c8e0a7b3e5969ed9724d830 2 @@ -10570 +10570 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 6b3f801263547fc21894a6c73fd7556fa169bf3628abe0db4d86eba0c395548a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 4331a1b5aab48d7631402fbc2391e615c9fbf431bf9f88b8a88e38ee86abe144 2 @@ -10581 +10581 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classConvergenceTable.html cb1132bd223b5b043f4c8b8120eff1db43517f3c2579411172c0463e980af218 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classConvergenceTable.html dd877416492ec014b5017435383b831e6cff8f07568edb389ae6bd9e455d23c2 2 @@ -10584 +10584 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html be8494520bf21f051834c594ec842e0e3af5c5bf8904d87ad178f4fe8f79f69c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html a232c5c38b787241a304138c03a111a2b3afc4ea63e2709b431554a275d9dc08 2 @@ -10613 +10613 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 93049a38ac8347e63bb3e592353b37be551b51385a407811ef0e25e983e4665f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 66e721de8b3f7c954385a63bcdcd6f302ecb7085bd80009dde1c12004097a86f 2 @@ -10618 +10618 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 4469c6dac1ce369451bb4b027131645ec1ce54ed86b05de66243634830affa7a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 3ed2a5765f4cd5de8d77a2f98f718e5fa83a45259669ae25c41e5347c58a92a6 2 @@ -10621 +10621 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 5a3524d421b8ff159c7c0b92c02d428b73c94e26c8af9928790e8750ebe7db23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 8927d1c2e7bccf1380964d40554567177d77b2ab1109ab3a251f4c33a60128b1 2 @@ -10638 +10638 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 3730f6cbc665b09d50640e15f42768d2b405d18d7029ccfcdbdfb574b1a760dd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html e7473f2a2fa7002d99bd7239d26196a5369725be6ce9b04cbfd57964f86d8684 2 @@ -10640 +10640 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 8c12df44288c5681520c457aafc7afa25117eaafe887d43303a866bfffdc2d2b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 7ce44820b9d7e54c6e23b80e4924240c7c82e0acd0d0e04368f89928a77c7eaf 2 @@ -10642 +10642 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 970c3191951e14796bf8c48ec94ca2c15148e1dd29663f5fd80e2d935d3fba2f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 3c67aa449fa60885be2f085a5b0a9e95c58e44a4414f72a4610e8f1f87426454 2 @@ -10648 +10648 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html d45e690ceef835bcf503b3ac7f3158303a3904c74823ef97f98d923899a08d7f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html d3230b8c1e235fd1ba0d574c70ca0b8b836759780aca4e1e99f94edcc7394d3f 2 @@ -10651 +10651 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html e9770a2ec463043764e104f9f92b82013d4f35a5dcc32557978ebced369cc2b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 37d0b0c41af29c707c57f0b1cc2eac21c8b5bc7e3ac648bd6bb69dc3567f7fd7 2 @@ -10654 +10654 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html ad2689204029d72d9eaf53761d55fde8639dce1fc1819f2e5d86db3c8bca0b6f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 7ef8e76c8a2d55d714811fbb0db386a6ac4d718c167a2d3936511be426013956 2 @@ -10657 +10657 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2600f93b27fb602f07f83ec887836ddb63f331dd3df95eb4e82a88c5baaf499f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 9f1b596f83bfbe90fc07de6480eda66ef4b230358d689ac74821a684639bc499 2 @@ -10660 +10660 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html ae154d2b27c7e0fac3aead5c75519d05c1db2541e10ded409a1084cbd2e7005e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html e5c248b2b36bb10ba7f01941f82732ebc2b2210ea2580247dbc0f90c47a1f010 2 @@ -10663 +10663 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 648842100f218a1aa1e79126e5256d8864e50485129b3e5902d6c13ef493c933 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 56c1daf3f9e1c519b0cab8917787c63db79baa64a0cb55ccfcff9ba561d36963 2 @@ -10666 +10666 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 82a0724adc8bbb8bd86c9da95efef4c175acecaa8be722f1f5949836d2a349fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html e38353bad36f2bb7fd9619776b2d4bdf0987917094b713c63ac1066ba5f14c45 2 @@ -10678 +10678 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 4c99ec19b6c759ab74672aa00ad75c54f5cfe1f5ec629b9eaacff0e564a8b2b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 303eb8f2bf80b63cef9f03b3ff2a581b41940ae03b7d75657513ca673f382ef0 2 @@ -10689 +10689 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html a6c87b7a79ead3e52da1a3455cc65a0e3ee3cb5c52891d7882c1898008a78ed7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html d0213c1cc50a81dad3cd2462f7c77a8e92337a237e436061f7f7cb9b3220861b 2 @@ -10700 +10700 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2f4d9f165fe26f10fc7df65fc76500ab38dccae0311cdf10f5bc00ec31bf459d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 8b488f09ede50b8ef4df22c0a51e302b233a32bf7207e3e9c5180d19a18e5ac8 2 @@ -10707 +10707 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html da46028e7b4507d02b34ec284ccce8e6308e313a9db8b5c58405a709872289f9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 7dd18f015db6595207d266654129f0dd7a1664c4488693c0161495f024ca998c 2 @@ -10710 +10710 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html a48c886ad05668216f1ee4c1b22147bca9dd79b5dc9dd1a2355f3128058050d1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html e4875927a3fc990e1583a8ee7c35619ae076f426a592e9ddaa556993c8b15727 2 @@ -10713 +10713 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 7642085d08bfe1072bd94929e59c2f5caa66e441416188a65393592b95ab2610 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 7b3a4f2bdc05c83245984f062319f5f10b75beb4fa52d3d2a50a49e4991eaca5 2 @@ -10721 +10721 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 5956b43383304c1c74f954e019fabc7acb6cca861020181eebed52d45d5aaaf9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html d883ba77c8ea259a274ba77f37d6756228b54b42e5efb1f3415315b13393fc03 2 @@ -10723 +10723 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html fb17230e17f9f8d1b87e4e350677a5c3573c64d1f0a234d41bdb1050f7cfa5a0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html 1af9e7b7a708be13ad15fced4a165e14b106d8e73d697fca0e311c545cf0a7fc 2 @@ -10725 +10725 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html fe61746d2478b4dc33e86c62728df4086691eca1d1497014997c329ff9c1254c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 5cdcde7db1ac44327158d3bf8fcb4575d0a4b18ea5421c0186a0efcc8acb52f0 2 @@ -10728 +10728 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 05f50a147d58f5477a8bf59a07edf0abc0e023f5fdc0989f6f401ba9f8ebbfed 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html bc62ed4943d401c0c916b787c237195d262c72a25eb60512b1d39200397c4e5d 2 @@ -10731 +10731 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 602673af90cd8ac5a72e0f806efed828fa0b68353fd13200172b23fc8cffdb53 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 02a221b63b2bd70d44ac15f43e330cc7eb231057762f061d7e771d754b64bfe3 2 @@ -10735 +10735 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 8460e9b1c49bdb794ceeb453f659bf843a65bd4476a4195279ce5f2311e1eff2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html e93fae5e3e044233d227b62a40811d4dff0458ec836dcbd5e685b692f1591faa 2 @@ -10738 +10738 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html a2602c7bb83706f47c7abee94c3a6da218f3adaf33c93e865c1fe032ee19b406 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 81ffa6996ae845bb9f7aa01538a5804d32706d0afe0cdbfe22ccb58d570332f2 2 @@ -10742 +10742 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 6fd36ffc915c6d5177511a20d01a1a32a38f87195087db843a7fb28976a86870 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 6a57ec5a9b351dbd551905ca893aedcadc4d4dc3bebea0933a5663bdf7825ccc 2 @@ -10745 +10745 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html f5b471f13bf41e7743cf1d7cde319ad6b32e7efd7f1ad6262a2d23c5c3c47ec2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2770e0bc76c3ca15da013b2a2f9c83526c0af8f60df8b9d08753c844d6bea725 2 @@ -10747 +10747 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html c817b7d6723631f62fc490ee460be73489ed15a17fb219bd2a0b00b673c11327 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html ad31d161c592ddbda530135c4f50d46f72f5a60bc07d29a283b6b6e4462c7e1c 2 @@ -10751 +10751 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 18095f5f3ee25837031939d1413b755ef3e1cf7205feb2471434f5f33ac56951 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2bd3e8d3083817ce60cf84e7fa5787ede4bd279517c9883b67d8c70521d3a58a 2 @@ -10757 +10757 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 5aecdd5fe3ab25013ecbf6d794eb8e3e88a85406c428dd8cc9c19aa7f0cdc1ad 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html b23aab8c3e698d38eebf6029101556b930e383432fef22fc8a2471778b2cdd3c 2 @@ -10760 +10760 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 0ebd834fc2f352922ec68c6ccdb00e2ca2c7641df73bfbef0b2eb7cbc0833631 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html ce0effa32b0549597c5a850f726447290a565604e20f64ba44625a409a366ae0 2 @@ -10765 +10765 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 490db44d02976c4025d83292277e8ba03298b27d456352ff4f79293174409e1a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 3e76830070d605c6fe076e2454af536784a815e99024240fa47c5a89c7c4d0ab 2 @@ -10768 +10768 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 7aeeb0ad364e2ac0a47f553651a55741df736ea6c12978f59ae0ee7d345b642e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 686a560f17bcc88bc18ecd7d257c4437644622c31e75cb9f8fe327aafa2a95da 2 @@ -10771 +10771 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 3623b8f25f69f1beea89db3b8eb33009c7472f6579f8c2db9745dd77683b07f8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 5342a47b0a833c1d3fb73c76def26f48194612c3e79f54a79d4c88e762004954 2 @@ -10774 +10774 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html bcbcb1d8b288402006ce149b10d317f63019bf6f3d048043ced2e6f56624e91f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 9fd5758ed1a6598c93253c43bbb9c7441f284ef4a83c5deb75c5e49d1a6aa732 2 @@ -10786 +10786 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html a073b7b7dde1fa6f96cead74b0522785c269d33bf6b073757c6986d82e52933c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 65bdd368ae9975085687dd7815960cfbf4946ff3c7c89452926e227e584aa010 2 @@ -10788 +10788 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 6d0252e90be669d222b48512b464398424c2d4557515a166822b1059c7ef979f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html f93d8934b9ce0b7e49c9162d15fcaa275e8cc6aac59c51f0a308442b60fbc5bf 2 @@ -10793 +10793 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 442c2f7f25ee9ec75392b35385b410a1d951cfbd835b50c1123a7ea4653e5274 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 82547322dd26ff93879f43b4550ff54aa24b926113ca49974fe8ad13677cfceb 2 @@ -10797 +10797 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 21b9071d328117a55e8bcae8c96f72ef4b64932329717a78fef0cefc7997cd71 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 915495254f2f87a7f2ee61631e691107835b68f939a726e8b41bbfc5dbcc3786 2 @@ -10800 +10800 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html a99c50c2b73b4a24a43820a2a1a973c53b6d903e8eecfceb45ff71b5cb9b0850 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html af923e01bf27951cc8f4f5039b0a4bc9c93d09db9115a6ec0a4b763a62d35139 2 @@ -10802 +10802 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 71f433fb9a85088a83065fb22da5c912425f3073b1ff7d5e3dbf27fe8722fdb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2eaa8ea6817a27b3a84fc2ed96d586c8c5106f76abefee7a19f056c2057e882a 2 @@ -10806 +10806 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 059c9239081de1dc76d04c945bd77b9eb096bf03ff1ed594f3e6d6d8af915e39 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 0ea25c70a70da4a14f692896a66e776c82adedda64a3e9d553fffc66c8f11d5b 2 @@ -10812 +10812 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 0a3b168e5f615eb4234e7fb9be6594cdcc45f2eb4b756f5e87e54cde9c4ce1b5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 5ed34098f1034d09ea8cfb520dc50de8a377e7fd91d8bf2ebeda9c1d8cda9dfe 2 @@ -10815 +10815 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 40e9b04068063035ec1d7e8492be6d4df0de64009333e4f7a5ed138b1d6d2884 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 90c114cea2666f1091df22f077cfcde7e3b37f72ff5d77c33ca4744fe8f2bc77 2 @@ -10818 +10818 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html d2456e6d4db6837a16e87401935df5ee56520154f9f5fec8054eec699c0ac098 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html cc7281b18dbe31502da0c8398c20275284afd457c8391208948c636b30fd20fe 2 @@ -10821 +10821 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 212597817210794901f70b59e80d5cd57876972a5dade29938143b7de7880869 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 37f8a0e5396841b0b3eda2c2b6797eb857007c1179c8422ab466f63415748973 2 @@ -10824 +10824 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html eed0979680cc5bc1db43f1c3a32882e1bfe21b7d600cc31d94ca4f7bfbc296e0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html c7070fa52951602ea70a177c8f82809d04957a22e95ae7a75a2f2e3e8fad13ec 2 @@ -10827 +10827 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html e93e171c9c097bd6ab11c4a493f8fec55251da9b7374ec2a754b0c5d96767157 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html dee7c5f11980087e7c0db870504f2fcdffc7687b823db8a1ec6b64f31faf6267 2 @@ -10829 +10829 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 112381bb61b0dd4d24b354b5e692b9c9e29aae94678bf6e80a24235f538c2ce8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html b793acd1fb98553832703af72afdf501db03b9e6431c9cde116a2be95eaae33f 2 @@ -10832 +10832 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 42fc10e7d8f074f091e103507d209f3df5b97ac51535f390c67e22fb606daac8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html c51e687c2d2005eb826fb143618deaccac2fc1078916ffb6ba06b418a81d40a1 2 @@ -10836 +10836 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html f5db30c938a5069362fd6a61504179d31be8a779480eeb886c0d45ba22ec4363 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html d43f6cd297cec5b709e13d9b572ffb9d69c2f31a0f1e29eaa784eac6463d0714 2 @@ -10838 +10838 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 807ce83f8d80d271246ba1f90c19bf45a4a5524c1605f0afd795a09ba34cd6fd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html f9a2415556be9a0bbda9d47f54889d9c0a20911d97a9838ee8981c7f1a05b2c3 2 @@ -10841 +10841 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 47bf8b4a3e321d2ace7797948700b3ade63001825037fcaf83d9efc26a6a9f80 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 88f31420bce0c1b35a2e9603d3fcb9e921c7ece3ea3f88e99b63f8197a41a659 2 @@ -10844 +10844 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html f5ed5b1b62fec5076096698a50312d5cd486fa4f56657bd53f633a3685095035 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html d7350b582f0b50e1bc53c9500559329837e420e878331a25595164509d12a16f 2 @@ -10848 +10848 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2838d3be421a77f94731f3784354f6eb36795eefe22c8e0c970fbc267bc6b573 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 076ead5390f184c31419677fe9845c7fc42fb85c3aec0b2ef32a92485fcf1445 2 @@ -10851 +10851 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html b68db9989d9678967288e1b932d0df9fd7469ad30601f12cb510e72ec17fc597 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 89703d8976d3e6a161de5d30a9cb1a6abe05ab39bae7f86aed909924740afc80 2 @@ -10857 +10857 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html acc766a4e5108164e79523edbdab6e1d0321aa636c3e44b551c5ca5d19981ce2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 79a93d3a889de790a077e8960ead2b3066867c9f305f593e26d9ffbe195909e4 2 @@ -10863 +10863 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 053b38f7e9df4a36778af66b8755b8a57caa02be2be522745212ff32c50c5907 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 395bf73b4431b721c62bbb57dac91f4a45ea9b3eb427308597ff354872c12ece 2 @@ -10865 +10865 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html b48d49d9acfdb999fd1bd7b769559beab2d81c408db159c4f6d04868ae5f6162 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 394c2e389bf99f374a51d32527434fd153309ddc3d30115bfde7fefb7cc0ed74 2 @@ -10869 +10869 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html d4c7ec7ac1222f1c81261856faab0242741f87f68ef203e3d0fa09629e4ba2b8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 0583094f725ab096c42e29b18b49e206975559599a9228db1f19b5e51e76658a 2 @@ -10871 +10871 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 955458749cea988b765f829209d3e6cc61ab2651123fb54593570db22eaf4b5e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 35ea6d30642a8111c328d4268cf1be26b6909d68edc497a6f33e26a922367783 2 @@ -10875 +10875 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 7f9474888f096dc98b8ef85563abe6556dc7132cc5675d500de67ac3d5411f11 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 0165e73babc4394e7a9122380496a463e037a429115f84fa325328b6227fb942 2 @@ -10877 +10877 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 8c1dacbe7adac6ce2e2942f55779728bf906764bd11ea4392ec23ea18eee9988 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 32d60c448c7b1112ebda62b424f0f1bedf07f3b371fcb887033020f00ce0a15b 2 @@ -10879 +10879 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html cf4e51cf49726db8117020efdd4d6e2fb3f443d4f53033467fbb98adfad48823 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html ffe31a83ebbf37a9a2ab0548c71ce3b5a43a233d0af42441cc09e8c0eb6f35f0 2 @@ -10884 +10884 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html ebdac7b3a179e72722c6c1afff46d4d42a03ece67bd4a9500af23b4619a69861 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 44c6b9b5ef18612e74e07f590d988f7a2b1418218378dcc88b7ebe13a6f124e3 2 @@ -10888 +10888 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 5d6a0f1a434c1f2f1cdff6a91cf1211f1aaf4da0b8371a62c1896f801546d120 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html ba8b23ed193a65c1ff5ad79e72b2c8f4b808610878272f5e2689f14ade348cdd 2 @@ -10891 +10891 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html beb302765af85a56900ea552d643b4b3ecefa34616ea07ebfb0380f906b6cf60 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 159a6c01bee1d78c0968ca61eb4b9e82fb8c9e43bb4b5d7de7affa3baf70f58e 2 @@ -10893 +10893 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 8947b129c5ce83bb1c877d7913cb2b893d08a37f054e4d763954f861ecd7d715 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html e5e8fbf0393983e7cf6049ee832a16c82224fb4cd5762c9a0cfe75a94d78a486 2 @@ -10899 +10899 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html d6366a1b591af74c738c2ecbc5ff4dc33c3f6c63ae531013b595088a6a80414f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 5fde64386d4ebc51e3dd674e425be6f9e453a56501f8a184f41871dc21a95f28 2 @@ -10909 +10909 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 5bedaebb3a79ec123c9f11987c2b21679e7a7e69ea5d719f049a53927e59d04c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 45cf57fea27d19230c07ed68d2f6957d0a6322a94ab5fb2484906a6c35e51c2c 2 @@ -10912 +10912 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html be88d5d84de48e176490078dc6e2921a237ab389be8ece256a79d31c9a2f5cf3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 85c50eb12e0b4937b202e682c37f1e959962b39f523158b8fb5131254b01eac0 2 @@ -10915 +10915 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 0727fe6becbcae52222617783a100672d61ad14887761b1d9eac9afe4c8edcaf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 75be030829ef8743ade5ff1dae98d32df025e4f055796737b71097bb77f37b23 2 @@ -10918 +10918 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 33b421cce72c8cff7ce17b3b7cbf631f1decfdc3da987891c9f545d0ed12e04b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 33af5c1eb03b16adc4e48b28fb6e308c2b98a5a90eabc903f48cf155fda819b7 2 @@ -10920 +10920 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 330edd712a4541b7ed6e278080a59b3bf5a1b664b252ab24e98b9b3490938e80 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html bf9d48e7a9664ed3faaefb0b28af22cddf4f051deca5e4766cea944c35eccd3b 2 @@ -10923 +10923 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 137e2fdfc3c7af7fe1c26ba94f0facd921405291b13ecac9f043679aa18edf14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html d420a2287861125958c3c7d7d5706fb11882b0a7cf59c6409b521db754f026f5 2 @@ -10926 +10926 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 07dcc70962f34a8cf54920f3e3f2fdaef80e60cc2b3848fa6d92b60c5fccfa23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 39aa196064a33cc8bb337691563375b37f383bf0070131e47f69c89654aa2d27 2 @@ -10929 +10929 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 69e8c78b7e3505998a4ff4182b8215258aa10eb09ecc3d94fbbe983da5a15e6b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 7363b28ff5dcedceb545aaf7034101f644e3eaba8821a0b52fd0090fdfc331f2 2 @@ -10933 +10933 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 46302e9a22e544bd1017f25daf7f201a97cd4ac7836804ad5bc5a974373cebfd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 1c1ee3871b883e2e16b2c869ac07ca1a83624c3878a23fb79a411ef07c49bd62 2 @@ -10936 +10936 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html dbd3cf9fe517b80542f7fe697e5e0cfc17d9ab470471f08e208a0b8c1d1e441a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 6f3f9096ee0928bc95672fbd7bbf467dc7bc67c0a91a447bf9b5779c8e68cb8c 2 @@ -10939 +10939 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 4c025e2e233de17c9e69cdc1eccaad85f0594cb41ed71cdc93ae9ece73d2f75b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html ed02cd5bfb62be5e813ac358e96db179a435cb309f89110c656c19e8851bd500 2 @@ -10942 +10942 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 572873d86102082eb4e26279956365a82beddee34c9317c861dea0f9deebe126 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html c2d6f2fa436ec516a3dc81313f751ca6ad99c1f1b144689a956ed82eaa4effda 2 @@ -10944 +10944 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 61490ab8b5bf8eff0684510590c6bc83afa5143ec239b36d45dadf0b1e9c4451 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 10a591453460a0fd3e8d2703963ad8e67a942b664306c0be1ae485add9d769a3 2 @@ -10948 +10948 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html d068f17d862476d0610cc26b1537c65ba1303e981ed91022f3e4c5c9ad4ff075 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html b58cd41514346bf38f8c4458b76506df533ccd8d8873b1d6d464583fde4e2c3e 2 @@ -10951 +10951 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html f063b030489bffbbeaa64743051d172dd1ff19885cf4136e1f632076035ee5e8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html ce0aaf1d41cb1537462ad7f06a1369829bfb7030fefcb8244ef87bdb2a58b033 2 @@ -10953 +10953 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 672701a6e7e10e8bb6a131e44168534babfe029ce4175eddaf1203ff76d7d53e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 4373362b7be3971c36a79be72824978831eef4c7c65bdf4d9efab42bccb75646 2 @@ -10957 +10957 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html f09cb088ac2f9d8047f58ee4f46b33f71a56d65ccd91454ec860524662390984 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 01ee0123bf494dc7ac3f84df3f92551d66bd3884c5e1274fcdc061dde3a50d12 2 @@ -10960 +10960 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2ff20c7e8ac58dbcdab7fcc74e70f68ce1b1abf834b34c005e644b11b78609f1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 4006dd0b714af3baa111ea4834caec5910abe3dcce87b74c7d1342cfb39f5091 2 @@ -10962 +10962 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html bd1719b29689166540197e2562a7a5ff74b6e5bd141b5346a6f0f87a47fcec80 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html fa4a18f2f0e9065d3db8ec4db33d77ca7763c5b30c884189e2e163c61c15ead5 2 @@ -10966 +10966 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 62181ab8c6455c42553420dd39a5276d71937851baebcd17f6ffeaa6e3f83324 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html b226e4fba867acbfa9db4e7a85f56edcb3eb74853cf8b53f1d5711f9d0f5951b 2 @@ -10969 +10969 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 7c152003cd9de58716d7d68d8cddc7ac87432bcbbc7de0f15a172253160d5169 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 71fe44269c9b3304092a8adca2b45c647b9b24dc10c3bed308148bb6f1bfe6b6 2 @@ -10972 +10972 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 0aa8196da21e21d2d11d1848554e1d05c8dd2dfbd984ca6f6aa58db071ea986f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html bbf67635a15152ecd42cd7c5b997d1ec2f2f2364a6a92cc91ada8249a74a4a32 2 @@ -10984 +10984 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 90545b4450288602c1563cbeceea32fe717aa0b95390f18cd859abe4328c3634 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2ff74a0ec9a18b381d74d82163fb745e939ebd7b64fca1f2047bea568e80c039 2 @@ -10993 +10993 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 38c37108758627bf3cc48d60ca4881df2fc818215b284eed31043e6d1db1875c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 8ca2f12f8ff2e34f17bbb0db1ea52dd1bebe75e4dc8b303c1677302f159a046b 2 @@ -10995 +10995 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 23d628a448c534e79ec4211eb63ecda2b1aa4f8a007887aaf60b61c8d440fee1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 11bf1f5a3aff685845be604eb51f36c30354f1fc2f7be34cb0149ba835e2a640 2 @@ -10998 +10998 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 602e9c14406f2bb5405af0f3d6df528f4b088f7570ee6bfc77ccee9a537ddc3c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html f0035b4235941591bf053ef0c7cca33d4a2a7efb922f74d9363095733790c62c 2 @@ -11002 +11002 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 115edd4704a0564cf7c35c3c1a0d51b845d61f841bc306bfb31396963b9fe744 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html ee3bd47c205cec6376413dc77f914b1c50304424fffaa52d0e6b7558fb651d9f 2 @@ -11004 +11004 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 269bbcad6f4f667f06c144ed1e1df76599f6c08437fda88e2a0f75c23e2d5ac0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html d7358744004ca8daf1ad383453ab58d4c634e17b92e32652caa6e16d7b22adfe 2 @@ -11010 +11010 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html d6707737e939153a1c2070df7b64641aa7390ac986548f411da3df60069e7639 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html c37d2145053db9748e93b1ebc9dc53207f84fa963519f25abb5c84db6d1af2d9 2 @@ -11013 +11013 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html b282dde52fa3d309ce67787365a572eeb37be50c689b600bed553ef8ed65e3b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html 90d81d1ec7a3681465333e94f696b7781d635faec890b945e2725bc8022892bd 2 @@ -11029 +11029 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2ce80308b7d1f7c02fde1fbab613ccdc2d46a9d427904ab4609ef67ba305faa1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html d60781dc81246e8469b82eb21c8580f60cb7c5438eefaec16523501ebac7fb69 2 @@ -11044 +11044 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html 4b188f9c3c4dd85fa23bed95c40d57ca9db5b8166aba1992a79056e61ef5dc6b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html 7180101e64309e5191b2cb3583f9300caae178df30307c63d5301145d54e3c7a 2 @@ -11083 +11083 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 371970f644252f3442acb8171503b63abbc76db8644f9eb4a543beca626cc167 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 33f2eef41917dc30944c0cda7abf479c07f1ec6a320ab079adfdc3b93ef9c2aa 2 @@ -11086 +11086 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html aab91beab62c9332db5f2d2e58796d88be29e7c072d084afef1e1994b6771421 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 99d3dde663c36c8911b89dbfed85550e13550b43e8244ae28c37b5e92bcb8cf5 2 @@ -11095 +11095 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 69f5d4e622e3ca4170c1b49b154c8e8016aefec9c8629c5d586989a55a693740 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 0630184abee05b8ae165d4b4b2d999090fa222b233dcc6fe28e72e975d5bbc98 2 @@ -11104 +11104 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html e4b8def8b719b6148931df3c835b79347e2d6cbcb02aa7a89fd75408bda35fbf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 02c9d39d6a3dac82b671d1241f21134826bd6a368f00999746086e4d0d4261d5 2 @@ -11110 +11110 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html c711ad78e2c5904e6879deafa2c0a7317430b3915d144efacae39e381c8e1880 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 11f62a536945842bcb38ebde20dfa4aa9188323a410e38c9cd6a7b9364aa40d4 2 @@ -11116 +11116 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 0f206c7ecce8e370a450896cc34796c91d48a0a379e736372e2f45a18978ce3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 1a9e6d39f26804b7e19304331b0ccfb268b4fea98df094e7059d0fcf425a268e 2 @@ -11122 +11122 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 51b87e36cfb4d290b10d2d29d6efcd003218007486d3619b2392d8a55da5dbbf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 33f9c4f90bc16a9595ff1e0cff72ce96d518820b99e7f9b73e69efac0bf9858e 2 @@ -11125 +11125 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 6b2310e6f8d61cd81a97b6fdd4eee9fff9f3dfa48b572b573e9a06a4acf37264 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 7c42c1d3db1a1c35910eaf634266dae1623547c188b65069915fe9cf6c1ec792 2 @@ -11128 +11128 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html d33f35983d47badcba49881c1e74ce931fbc3bf85c50c9e25383312aba146599 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 0068b90d8a381980e9d209e235a608dd0ed3d6880735a62cd7fe2ed7aa04647b 2 @@ -11131 +11131 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 85efa304190bb896d16c55466c8000a3b3770739e19749c2e1db8ded2e614533 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 6a254823f2b05682ec33a4ee527361f3509c3ff6cda85b472a58b8565bc7a23d 2 @@ -11134 +11134 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html e411236f8fb9723a6a12a3239b086e100982b1ac0f07b292628bfe30cb570a55 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 0c58045f25be6525fd63c87ae2211c85659d8abc1c9d88a3ea9795e7db21e8bb 2 @@ -11143 +11143 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 7f34221c537e06e061db328c6fcb8048bc0bdf8e2aa3c021052c6781959667fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 7e6558c888c517f23ab752004734ca245b6dd5a9fd8694a9d3bc40e31141cb71 2 @@ -11152 +11152 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html a9851f157ebf75b137398046f4e44b920cd9fb035fccf1f8451bc92ae71b4904 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html fb57ff6465787a8907d7914ab93a0c6f13c365ac6050eedd257a219092af15f6 2 @@ -11189 +11189 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 8429a9d093c1e5063ae2a1926d144b7246e05df1582a5ca5eeb77b445f57672d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 8c8f47b10e28071c945ca3130d18331cb4053fafe36998e9d148087ba0660fa1 2 @@ -11229 +11229 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 8be7707b7c539c7edcc9168cdbd646a918408fc97c1dc39f3b3b434966e48462 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html c94e69340df3222f9b0b18f85a52fb8149ea8f1abeecee9daae0d995afff6779 2 @@ -11232 +11232 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 6f229ca1cc7ee80337e69023bceb166aed3df736b1674644201076097a2bb817 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html fb0a1ae4c0a125d31ca1f5b5b386f0f8d55cbaeccaeac8720ef1639df9e93b65 2 @@ -11234 +11234 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html a55f1b835a1f95767f7425d40888fd548282b28f9505f5b67c26dd3cf7a1704d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 7cee836837e0bf829db7fab1070fc3a3fb115664cc02ce04d208a367582a9bac 2 @@ -11237 +11237 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html d5b90121632f40358336d355b57d5ef91da7936b9ee1fcd26408a1beb392e93d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 0766344a489285054c9364c46b1c2ca6242b86712c31226adb6d692016f69d8c 2 @@ -11247 +11247 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html d3b42d893df1c00c2b82f1e07d6e93baca7bc9ba55458a35d3a80204f008107c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 6a647cbb415621040afbcdf1cab9f9279efd19573c165b294006a144e8e37773 2 @@ -11289 +11289 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 8950ee3f75b110e57ddb75850661cf843b1e1977fc3dbf62b7e088a52edf1996 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 65a619ad5abf86c3c44dd7cd9f7d6e97afff65a0e588ee84d83cd956bcaf752d 2 @@ -11302 +11302 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 62d4b121d458c1cc7b50ec3e178581f9210d3c216e29191487db1398e79e8ea8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html d030eeb11e87a48e165fa55134e5766866ef1a4ceaa40d95d36da11b4fb4c913 2 @@ -11306 +11306 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html bd48ce161537b9f4679f3f9995012728539c719d7b672f52bf587d4f9647bdae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 321f38e49fa5f585555a0f6ab54c10be07cfa35f1a1c56eedc04096d4ae50d44 2 @@ -11315 +11315 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 214ff678e6cba04b513d1b2fef6d54bf388d9ec3c41fcc6726467ff273f6de49 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html f96411403e0228bc25b391f948ab07a3706795fbee86f0d8b2b18f96a6720523 2 @@ -11332 +11332 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 8013e2bf0342794add0f07e5fbe72a112d69ff36db60dea556fa8d3bd0287a23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 750362d867d42d88bb307002f71d34148e3928f164bc898e36c7c54dbb70c75a 2 @@ -11335 +11335 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html c10f878c168e70aff1feff677fcbc3872c11d8a359673628c8eb86cfb88aa564 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 719e0c69c89f646c170f12d3722295a51b34e08cddb98cba4f3f0c0cc2d952e3 2 @@ -11341 +11341 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 6226e14848f72f5a1152481128194df93ece0c35cd98b60e6b31e7a54b7152ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 16194620b610792d609cc82ad41bfafbcde9741b9ef6db8c2b8ade1c47da6d82 2 @@ -11459 +11459 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html d600bdf3d10900c4ccf8935fff382cab0b1b29f84c47070170f50cdf0410aa15 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 5ddc062b21b4849da0c117eaaf4dd2d253ed285c4b6be2e881a76383f7f7a2e4 2 @@ -11462 +11462 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 5fbc28205bac16834c970eceec7990aa66374742df34b6e6fbb85a67357d892f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html f0ba1e9b4d12c206244b68869a522f624a98325761ded3a256f0676f3ae857bd 2 @@ -11464 +11464 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 1bb72417480e86fc610e1362d607ea8ceceda5984b6208e19f5a1b1f06dfb38f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html bfe82e98a413077e64d31b47345240d05aa2f23a34c3f8b4beed62d32a5eb811 2 @@ -11467 +11467 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html df8561676f0d08ee4c26f312c160d22edef2f4642a3c087bb9dba491ebcfd02c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 0ee9a8031c3838f6d2da65963597c739f69d40ee56ac9561e4c10bb4e4c4236f 2 @@ -11473 +11473 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 17a873dbc2200152c1d751b1bcb77231e5cea399c217ebbd1fa691b3fda34e1e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html db3c406e50434cf0395a5b9312169254300b31437b106b7092adaf04024efcb2 2 @@ -11475 +11475 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 11d43b7c003c7fc7a11d016a0ee6c341c0d0006badac9ea6c05641c200d52d07 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 0fac9fd035fe095a9a59d35458f0f9cad231f32b17b2678496a2bef0f52fa63e 2 @@ -11477 +11477 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html e8c724198dd35c89e0657d098d76a36eb76f77607ebff0f4627cc615ec22eb77 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 2ac22cc65706cee7b8cbfe7d8abbccc551f874bb11cecfc0700e6d2877dd6b27 2 @@ -11481 +11481 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 0cec4bb94eedec3896ec2fe0487bea3c844bdcf84fd4b15040c723f18c5ecead 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2a68198486fa8bc5db365e00f6551fc468c5909483f4adae249db11fc72bfa82 2 @@ -11485 +11485 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 74e1cc5285e069768642031681f5029b1b5a5801ea65601be4562821b8af4a77 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 6a1b38fb0621cf35fc78fc3c0d1673809c4f2014e26224d41fd12ed5b5acb706 2 @@ -11487 +11487 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html b704d1c60f8bc8ecb2133af3e6506fcaec0e254be8d3526bc39f7191c4f585dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html c68a2a96ef537023f79ae1ac5c13a2c19eda3a9dff9f6a93ae8cd41561787405 2 @@ -11492 +11492 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html aabd3517c55d7c5b7a6a6de9e0763964e241ea217b13791e2f28afbf03ed444e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html bc6bb07cbfc50f47d36a0c39f61a14e1584b3c5f3cdce83f38656a486890e4a4 2 @@ -11494 +11494 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 110fb8bfba47287ec9ea8ac5881259052f0828d8baab5c4e6f24bc31c5b544de 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 12149c302b8ffd9b26b61faf7662760550f5dea848e79beacddc9232657bd0fc 2 @@ -11496 +11496 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html a28e97054d80a700c7bbfe331cd533ed85a162f4b1754d2ccbd9bb052f5c8bf3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 349855921c23d87b70805f25de4f036f07086f688edbf2d83efebc64f7328f54 2 @@ -11500 +11500 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 35f075861d22bceb3bd0b0f4aac0c270b63751b4a9d733b3c6f20b079ad6205f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html bf722b0b15acae9152973486d06877456ae7a2a9af73186390770a07862a3619 2 @@ -11503 +11503 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 45e47ac5ea7e96ab0213548c4d76bde5a398b86b19dfa7542b92fbe5cd7dedc1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 33f435de2636822f7ccb56f4122ab2c4ef13cc3721ad87dcebf8c34f0e3390c4 2 @@ -11509 +11509 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 398f04a3aa021d8bc3a9dde1e2ef9f43295f3a2edd9c4a93bf1b4b5782a41a5d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 3397206de4b9760a2091fddf3825a819287c9220b364d6fc370c328202aa5138 2 @@ -11531 +11531 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html e7950d418551951ed4857e70d82294a791327ac290ec698ca51c82115ca8b6e1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html 84fb0ffbb9fda2cc064abc250e17589f734c3ca7f8c4e41a607e16ee69ce4e72 2 @@ -11576 +11576 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 5a4904c6c7168c1bd854a313146bf8d1192687720cd43ad8d75306312ad743ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 5fe6931d5ae84087fd98df94489adba8e17a19e8b2ffa6c59c965cba43e8d7a2 2 @@ -11586 +11586 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 84fbaff632b0ec81e1cc0f65e5846ffe3e7917061d533470eda3506327bb95c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 716ceb502842e815700a2518845586f4664d28f5749ad25fb05f5df991a04d9c 2 @@ -11631 +11631 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 1ccd31a7e5ff961183c0dee1804ceeae26fd94e820091ee04e53ff8d5e26f14c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 346cc5c8c502767a074a03d46b9af49a49a5c9414600669ee4c563c92f673bec 2 @@ -11634 +11634 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 6113ef29fedc8fc1acb977346db1812e299600b53c6e4620184267408212de2e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html f5ee005a811cdeaf9b2b4c07046eb79872d13ef085d29af322ba2de3dc4d65b6 2 @@ -11637 +11637 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 0f6be8d2bc08d82b3864d3ad6e974292002022da037a37c1b312e4a00ea4bf4c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 481f85175493fd3d61c90f161b7979dd0b310eac9a97a2a4042046ca2f8ec755 2 @@ -11640 +11640 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 4ebeb3e31f9b283d6afdc1ba5c1bea1992dbb116bcd3d7d79df28ced0a12c319 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html d81028e937910bc5727fd780672953d1f922ac589756b989f8e6fdab2911abe4 2 @@ -11642 +11642 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 986c4c17709a7267b525b06475364bbb90b4c786553a1831d036620a323f3652 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2f8a2129fb7b0aeb98eac02a4a0edf66ad13e11109f67678ec6fb3dcb7c84198 2 @@ -11644 +11644 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 264f6bb8a617934d54befb70048bc768586775318f225fffa0c677712c869f34 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 6f74bd876546850632231ab70a9b5f208cf29aeb4fc1b768a4c5ea11710b39b4 2 @@ -11646 +11646 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 026869b33c1258482f058c565130472fa175ab2e0c2377baf0bca4fb817634f1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 9c4bb2719c5f397a81b210b2ca2666c5be84518e391476684fdaeed138f9cc4e 2 @@ -11649 +11649 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html adee8d34b46ef9ceecc0a4b445ade4863e07fb5864be92d0cd380c47b03300ae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html f229dc282f2a1960ae87800d0d43ce8c7471fd1341f6c09ce3e46bd9437ec733 2 @@ -11658 +11658 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 201a919b99f9a61915cf9d6526fda761f677f72b889b26fbd4d20468b19deff1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html c7778649a98cf3fead454afa9345dc6c599f9603d03a0135203a2e6111fd3b97 2 @@ -11666 +11666 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html bf9c264a8faa0fd5100750f4fb975c98d30575d358426cb7de0b05544cf52191 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 65410a3dca364e8045d8bbb8cd9151945ea82c9016de55fe9758ed0d17c7fdce 2 @@ -11681 +11681 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 1ddefe984b484cab9e60a3d863a795b3db4c58c952ac503e1226c3bf45a0fe11 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 62d727e513e552da0e43b8fe569b0a3b076410c05994d899dbfff9dfa5e8df46 2 @@ -11686 +11686 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 3cda92e47325e3bd3f26ed053e0ea8f60ea95d6c4b6e54385db58758dbec01ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 916e7ea5293d237b6657b70c7806423ee4c2c32a9cd89de8aff18fddea6175d2 2 @@ -11690 +11690 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 227b67856f873a71899e70c1db20c23ffefbeb1b236fe075861f36055da53a63 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html e7ca97628bd9c826d9be6565381be102ef9dd73583c5f14ef512b53fbc8160c2 2 @@ -11692 +11692 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 3578a2efc39e4dbdd080458d6c4c2ca4b5b7ce212567f812058c2279c902af1b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 7f28efed3788ee4dd470596d421e708535a05e7d6f4c6bff3b0b3ec8afb57d3a 2 @@ -11694 +11694 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 9386cf22d43255aadf57e92a2d1baa498630177050567e6011cfd171fa4fa9f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html fcc578014c98146f6ee0117f9a7712c0393104abedc1d9063f62b9e6724dd481 2 @@ -11696 +11696 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html c457004a355fb24c45e76b1429f163c8be2c60fccb9d5208d26ff72bb3e28ceb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 5d6794eaa0e4d34859b2fc8ec4f13d322ac415c1543e5c0e0dbb271894e63860 2 @@ -11698 +11698 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html c745f64cda6758167c2904d1737dce3c85cf5a97ef8fcbbf5f021a5531142980 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 99a01f4e6d62b21d5985237b15b775bd5020fcd4db79e377cf524f1cfb333ae6 2 @@ -11700 +11700 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html e95705db87f649397e4be5f02ac73774e82bbf4caae747fcab6899ae4cdfc608 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html fb393de211c703b0c311e4ef91b65c69f15820cc2d883ffb958209bc43a890ac 2 @@ -11703 +11703 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html fff83d2cf598a537e5d449636034f14301b55785538e6c57ce87c71f1ddd4188 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html de08e66da9b574152b4e35c48928c37b4d4c7826090e2bb0af451d4504e80f81 2 @@ -11706 +11706 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html a97a67b9fa8d89ab6f0bdc3d78840fe5d08b75b5c3f081ca1df1291752194e92 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 6527696b065940e4ae5fe18066f85343798741acbee33f7aa635a3e9e159d8b7 2 @@ -11709 +11709 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 9748054ac61e25a0b55bedf5ebb8a29d34c0bd4f7c81a2264963152e70f74f02 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html dc2e4e2bc870f1fd2777e1f637ee74d1a793c5c9f002cee45cc732f2c7babfdf 2 @@ -11712 +11712 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 193a34b78d91487322f9f48bbae2f9e1d43811bc24fa71762e3fe9be84249304 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 1c9c437b99a75e0099f07859f00627b2e0291d87e3eefd307929e052a751c8e4 2 @@ -11715 +11715 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 6c537fa05afea999839149f9627aa9f1735bf373c4806e738186989f878d5d41 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html a04deee944ba10b1f2b57fea8c839336bcc022fb02f525a97414016ec1c2f552 2 @@ -11718 +11718 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html c8ca4df1aa00cc0fc15e3930589457fe8a27f818b39916d53c82eb5711d83a6a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 6d8d5c3addae99b9b1c96bdd27473e3157d70be56460add98aa5daaa17acaa88 2 @@ -11721 +11721 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html c386e2c3bf4d003b8df314b7fbf0456ff41b7c695f57132bc04fea99355dc8f8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 168fb3868873f378d66a31d4f17ccce950d0ef77a16dd7067f5f7903b0833b4d 2 @@ -11724 +11724 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 1dd1a9056b7e77fc479b19800ecdc3cfccb172721162af0fb504d9b8d8b87983 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html b07415c1cf6c29e02a2ac6593f486f59cd619dde18630659b1523a13cc742ad2 2 @@ -11727 +11727 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html b1ed0bc1f4f571bc401d95041d0840dd0babaace923d9a5a5bc4c6672da0d450 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html fbce0cfb56a144f80073072d74cc511192348a381263ab4cd9e041c100a54cee 2 @@ -11730 +11730 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 1f7375090e2fba62da7dae2b24a6c8032b1976a11084584a92ec058628e7ae70 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html c3c461390d804ce96507153d588e8ee6e2a7eab81b60fa17cf2a3a19a28535cb 2 @@ -11733 +11733 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html fc1ca1c029e5a88488fc8697827633fae42121e8168604d6553f63ca5df47513 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 5af586d93dc187c1c0c4c673985ddff608d4eec5c2056ee9a6b2daa0d4b4e020 2 @@ -11736 +11736 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 87e2e9a3140bc8fef666a229e7b3a54999564d41c601f6b6742cd0cb3bc2e9eb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 62777ae6aaf16d68ba18e2245f65e6f9622d387d27b9792e2c12e59a170e516b 2 @@ -11739 +11739 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 13a21d8b265e378e57478dcde6ddccd18b70c5f84b051fc3fae3be4305093767 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 8d4a2956201b35627cde5c4aaa7bb687d64a8f2cf211d153468206ebe7f879fa 2 @@ -11746 +11746 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html bb90414602e8d0e0278396a7b2ed1940e81aabb371b7a42efb9d040bf5539c13 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html c11ad176b0bc7b5c74fc33ba7acb116661c1336a5b15af24394f6a7d4a5a72d0 2 @@ -11834 +11834 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html cf5dc70df663b680440369ec741815f18570befa022ce1eda7b2cd16243a1164 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html c415eeca8a3004a472cfab9d8d0a8da3d1342b3f8992500bdad1de68de90f2fe 2 @@ -11837 +11837 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 4ec317bb484ef8c67b47e592156aaaf5e54119b4f8fd43827131cab7952ee030 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 0cdc70384050b17fa9f6d08be04ec4486512103b7b77e884bc31e22d6b68be8f 2 @@ -11841 +11841 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 70a1cf3e293128a3f8a2c1f767b1c533927c5e1795e2a5a8e7e84adc4d1a9d22 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 3496791a2df3d8dfef1ab7888419fa963d6b6591deec80c2be903132f39382ae 2 @@ -11846 +11846 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPackagedOperation.html e963085a5a65b7eda2dce277f2e7fe10c02c5c6bb2163c1992fb937c829789f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPackagedOperation.html 79882520177adc4154fcaa365ea83a9aad1f4f9636d5259f419b5569718e1971 2 @@ -11867 +11867 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 68bccfd8a1061ce93104a13dc2edf58c96285791b62680ddad6480a547c4ddc7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 09f491a9560da85c2233f7069ae2e1bba6baa74cde01a203bf2b751235e61c4b 2 @@ -11872 +11872 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html e87dae7ca398c27843d37edc6ef5cedfc371673fbabef2797aa7bb0f93a5fe11 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html fd367c7121af8b0692c99a6b00c4ea27337afcf387c02eb6f9ef98a9f07f1e55 2 @@ -11913 +11913 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 3485eafcd3158bb4f6fad3b229aa40364d679c9763024d15baf455fea36d4f09 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html ddd738b786e9d01f9c442a83cb848098cfb030bdb90ba365535593f37f0dfdc0 2 @@ -11916 +11916 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPhysics_1_1Elasticity_1_1StandardTensors.html 9a09d648c25a2e4d40837e090f07923ec45e2117a70ad99413e7aa786993ee80 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPhysics_1_1Elasticity_1_1StandardTensors.html a0ecee9003f35b4db3188976e4bc6a551b3178cf4c2938e41c5bc94a94cf2f3b 2 @@ -11918 +11918 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html c260b8247b423d1ea13e1777a5f102d45532309033faadfce0f40f92fb2497c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html fe15a8a3675e50293f4cb02fa358769e82b5dd34cbc53c6248b76dfcbdd8cb9c 2 @@ -11923 +11923 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html a20703d6937063081c57c8251a8d596866a424653e4955da0655709e330c96ac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 8938d88efbde4611945057a1b71213f868bfa58eebcf70903bd9d0ddbe93248b 2 @@ -11938 +11938 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html a8cfddbb576e685ead4a8ecc193861153ede3cbbf4903e96cdf6a1dd83dae7a0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html dac2687b57219049778298bf9e3746d7d3a124dfcd24e21743f8fee57ccc712c 2 @@ -11941 +11941 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html afa864aaf0f5caccb6cd1a37fcebb68a29fa71162ebebbec60126e746eef7062 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html 3dc5f27026dc50bf13083b6b4bf761579a709095db24a123eb9d22783f0f5c52 2 @@ -11959 +11959 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 31e813fa9cd030ca39846342b3da020290ea5aefd5045c632c609070aa20e78e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 1c45573804aa4c495bcd58c9a5700fb52cbcd90366fbbd3c5174c609950c6a8d 2 @@ -11962 +11962 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 53e64e0f0aa0094de90bfd7c049a24fdf35a0450c717f73efcbee68962a195f7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 7acc020bcb1b7490f0014c84dc81b317db483b381f79821691625c5c75407e4c 2 @@ -11965 +11965 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html cb8a74f4cc6df4fdfddd49dedd592cbf66c08522433f610dd08ceb1b4a5add2d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 3847ebccdaf21a75a797508d9c63dfd7521796ce409d50d13161b26b8a430250 2 @@ -11968 +11968 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 3e9e4a98a9e51fea4a15dddd89eab5f4a973cde5bd4f20e906f26825b2df3d33 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html f421ff497736cec954cc0db88dfbc86f1c8826c6c4b7fb3f7f265018057f3d3c 2 @@ -11971 +11971 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html b5fa53f776924f2b467a77d94dd0ec0c2c6138fb9a89512074bbac1570d2095e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 72338823e586127a3d078be2f4eadc5cc3578eac1b2471af6e0222d0c0f9306b 2 @@ -11974 +11974 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html d0d81a193b08c289d4f4dd38f75fb4020d5cee4152c44471a3c447d42168f3cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html d8c748c1ea7f3afac7b9294aa4fd97e1da503ce508f24cc26c60e4a045b9068a 2 @@ -11977 +11977 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html ec4af7a4d470a46267a2f8dc1716e08bcd7e4bff9acc4d83fed77f8c09ce7e2a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2d3d9656237685f6229850eb2c344bf1ae9079105b9cd06c54b68af59bb1a850 2 @@ -11983 +11983 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html b37151aca7cf400dfed10b9936e9ba5de4298fdfd77347a4e5401c185cdd30d5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html e9f84fe029f053ea502cb6ff1590c8752f0335b49deb8c8a4e6873b62d4293e6 2 @@ -11986 +11986 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 3e00fd214ddbae667c2b85ce4cab0f4ac11da195288263a8f1fe581b94f1d874 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html a1282f31b327e0561717b621eb1b48cd30acc6268239ae398af92690359e6be1 2 @@ -12063 +12063 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 1737d006b30efc3b50d301e7b941a8e139850f01db06d0b1eb47a5e581f8c52c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html efda8bb72ad79e6cf57ab33669ee4244eff414f31d5497c3778cad16ccc2353e 2 @@ -12066 +12066 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html c6682e2ed134e2a500af86f586b5890df68e2ddacfc6700df3f4bb4fbc09c2df 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 62bfe0d5571a5e3aa9bb97e40b0a85c860cc72c1a5216d8e80b0bacc0c3cee7e 2 @@ -12068 +12068 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html cb7abfce8aa9763d8f25cb3007e24f64105b643bf1192f562d02b1a837bd3954 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 45af40b87c5dacce27933ebcd782bb607d2efa5ccd16be36b1e848cb574f0ba2 2 @@ -12072 +12072 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 1ee6899dc58ec06134ade83f4dad54e4ebbc5e3792191bf2859a889e4136dd49 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html ddeee83d9bf73bf0777e71e1872ca576284622b4e9310fc19295383bf1455dd4 2 @@ -12074 +12074 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 79bd2be192d4f3f7120927fefa254469b6a06bc7ad90f1fd236f23062f29209f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 1de0bacfa1771245804b86fb60e5672abfb89c9e58459f66bf2ef2320b8857fb 2 @@ -12078 +12078 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 200bba11317b7f93e892938d17029c65781ce97b9f7bbb6ddbf15fa9b34456ca 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 9f7f8e9698ed787bca548674f3d22f3e8d5c78a79a47eaade6920748eb9f1dec 2 @@ -12084 +12084 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 3206dc3a2d7c3dfccd80a566a5d12bbf022c7039161f2528933c3abdbe2499d5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html b8d7174337177d529e912240458c5ea2fa6d21fb24fec8fa1b9d035ae76ab9b3 2 @@ -12110 +12110 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 544fc68b720d9db489777d42a527bf4006047e399ebc1d987bd4e780d28f7fd6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html fb5a9179c956848f32ace54942aa21bc2a7442c0133d697253c337c70b1600c2 2 @@ -12125 +12125 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 719c56a8ae7567814bee8018c13842715cfa1d4dfb7a5b09026505b022c84025 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html c62b678f19a37b4eb91c39a929a0e7a8a9e4fa94e7f4f39970d1f64f6a372d50 2 @@ -12137 +12137 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 0280db262ec51e18966d64cfd88224348665160cb941ab48d24ae2e91cb5eb68 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html b017a6961a6122d1c1ddc7526e2b83095e738b7030770bf9ba7e24a1217e9730 2 @@ -12140 +12140 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 9ae030cf861f293d08027a7790f1c3d36b226ac7530cf02291d511b667269c89 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html a405c5b08b5005b854770fd1e6a87670a40d82f076e4e5ceb49abff293e2f276 2 @@ -12151 +12151 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 0956cce8d9705cfe2dd0b1126cfa2dfc9902b75cba4ca5437836fbe5b6b9a7e6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html d02864516699652a181f328eb9d9e8044d2eba3b6b221a6c8518e160037435f2 2 @@ -12229 +12229 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 883ce60cb052a7e6ea770a43d1a61bda431b338fa8e29ba93232161daeb98e01 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 191671896957b7218ca34a7e92a29b96629dfc53e07c5f8265f72d5821e4c45f 2 @@ -12233 +12233 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html fb858df227abb6c317ad472693a8e4a626ac4ce1b8b9a18975dd83051238d826 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html df75e397594bebcf0d22f3ac6fd108055a72858279ca2ad537a8ab88f3833aae 2 @@ -12235 +12235 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html a491b9e1c504847ccdcd1379eb102730749ceb66c4e182579fee06a4e5a07dc4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 89c1cfba9fc7c48552038abdc56c272e923d6b2a9210339216a14e24a57d74a4 2 @@ -12237 +12237 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 0565b9c22d85463a2e83a269a8c24526002d1f135275784149c2ac958f71713b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 9e83a79254fcf8c4b340f6e212b6168ea9e417965d047cb7a70c40509661c9a0 2 @@ -12248 +12248 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 9434b55e7a040d5fd4903e3e1255c45bfb7f7341b3a402a97463319e415bd0d5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 90d4b185bf888b85211d9dd4fd1f0834231e5f7a4af55f2bd1506737625f94e4 2 @@ -12251 +12251 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 751f5eabc939707e7d67b23f5e97d0ee0af05955137ddc88c246867c6e93b0fe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 6344e95ae8cd5d292f8a0150b9cc81e4e0ba2207332265ff70d6c7bdf30732a7 2 @@ -12269 +12269 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html b11a1960d253ddc58ac2e28b12968cd760628559d1507599ae44372b046b314e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 4479d5b020c25ff407c732db11ab209e6694b00f8793cda0c46b95e1cdeff65c 2 @@ -12278 +12278 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html ce0ad5521072625eaffa9fbbc04683375be9316f1b57339e9d5f34f53e08c5a5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html 156756111d1a9b0475e6da281c6c3314107437dd1afec3fcfb6f94e60d4feaa3 2 @@ -12290 +12290 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html ebd928e99053ccaf81c7dcd389b9a341f3915bbabdf9ce5f37251d3f42c4da02 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html f386e63760d2a2a85b9dd3999e0efb9d42c6d139ad77eb5720cd649b26fc3e6c 2 @@ -12293 +12293 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 82e185b774f84073e6bc85fa24f2c3f8795012bd9f9b8b54fed0e5f8b42eafa1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 62ba3f00b2373c17e0edf7b93bda054d24627e63ef4b0f35c92e69f48f100f4d 2 @@ -12311 +12311 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2f24dfce8cb1fac3fba09705d01fdbe6f8785ec889410e631d4342be0c5984bd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 83e908a1e1c88cc16c7a8d5f4b404f8776b8a47aa889a720038ce85fbedc03cc 2 @@ -12323 +12323 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 72db06ecee0917d4af8c5b6c66fe0bc1a21e9a54a849f05e9155b5c337b1abc6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html a5544592fcb9ce7d365962ad9ecf97f96205f13bc3a646553a0060ad686aaf06 2 @@ -12327 +12327 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html df1b39d5e4982589e5da655a9b288b62b384ca92c761f00fa351268d4359e488 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html edf4678b760565bec9f2703c64b8a753081299e58c0f6d80dccb59a12477f17b 2 @@ -12330 +12330 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 02398191fac8643d13ee0b1684bff586b04f4209481dcfc26070987977f1942b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 42b07a9608c77f69dba7e9ae0e7c49255a11d2966016bafd499e40505a7d4811 2 @@ -12335 +12335 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 9546d100d949e6f7d9a578a49405d6ab2b31cec78d805316865db88943dc92d5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html fb66884d65525b9e70aef4d12e04b248c7810af71893f2c9d172d0c2a29414a4 2 @@ -12338 +12338 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 960e3699e463b10838e4cc24839778399d8dc9686201ad0020cd9bddf9057012 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 243202678585777fd7625ac13faa98cae1d3a4a8d4acb5c5cdc033d1d639155a 2 @@ -12340 +12340 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 7e60ce4650b795fcc8c4e14096df10e2716a74689640007aa93d93bf91c59ce4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 59bf7def87a936937382dfd346d1c675c7ecac093067d9fb5bed2ab5bee98037 2 @@ -12358 +12358 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html e63ef58c90b46ec916b1701b4cdb7689b46daeacfd5ef4d6613fde6468aee5e8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html e6bcdd90d8e25ce88ba3621af8560800744f72275608e944fb6c83a59b6665cd 2 @@ -12366 +12366 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 8a15eb3e6e8038e60e6aeb34808e1ab757efdb0bae0d2a6c68d9cf11ae1deba7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 7ae9138d3df50ec16591104e4b46363322b24603c461f47fe3ab18322ec90354 2 @@ -12374 +12374 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2a0be46f3181c9067dfd789794e1c857df482abf2efed64ec803aa3a26f21234 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html e17fdc68759bc34782993eb1870f65f8a4cc85d915b74265ed12026df5cc499c 2 @@ -12378 +12378 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html b7d3689c8c311407e0c1e940856625bdf200f5bfc6e4ccb13a0981568fd091cb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 13440622948a45f0bb719d71b036c312984c3f29fa9613796c2cdf7021963080 2 @@ -12386 +12386 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 24c3af7e7bd4357921d82b1e8200c73dd7f22d62df453406feec1c556d560697 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html c148a5de962a31f757787bbe80e28f67df1d20383d032873b86657de02e1d45d 2 @@ -12390 +12390 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html d8dca06bb263efd7ecb98a2e599b94cde9d3548fd84fc8f64181ca0d240c4c3e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 276c884a9b7ff0c9a5ccec41cd867f38b85c5dd25ff0aa84b22f329c8ef88de0 2 @@ -12393 +12393 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 909cf1635cdd1837260912fba5ab491a3e490fb839f505142569615edf6d253a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 9372a95c25861dbf085ea58481c6f83eb73ec658177b01ae31abeeb038d6116f 2 @@ -12421 +12421 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 75a62593a8051ea4df47f09a7d7cbe8c6909278f8451476553c72bdf11a8cf1f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 1bab0c28fd86feed5dd9dae417ab6d80b47039e69a68f7ed712678295aba6d8f 2 @@ -12460 +12460 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 8b6b8e357c8aa5f2d8d31ca5dfb07c7c5926ace9a0680f458c4f2b6526b1eedf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html e1697da1d5321bf765af8db66ef9a06a6cd4c8bfe131aabe8a9fecefeaf7a9d1 2 @@ -12463 +12463 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 5c2cf9715a9aa0222fedf5699181269737817700d6063daa66db6c04f6f78416 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 8b6dda83523c71c75f1fd6ff078b7cf31ae9c423ea3c049bc98e9d52fad9a355 2 @@ -12469 +12469 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 614501182cfe6d0bf8cb84715a442780674d44443b580404bf1c3bc4b49b0c95 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html bbe7c006403d87926472497caf271f3bca66ec31b8dec79f7217cd695e73476f 2 @@ -12540 +12540 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 30ffac37889a6ee9276d29c83de8088f9d19d8f273c93043f1f37ed497cc58a4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html f377912b9ebd66c471d24f3ad2f4600fa6472de46922bd06b827e19ae6fa6854 2 @@ -12545 +12545 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2bc75d4d4bdfea40d05101851434e7c8fef4bf987e72cf9c1a4581ae0609ac49 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 7addc4727ad85feea4640fb5ff0bafb540172f3bb0f9b58109fa3ce2f7b03e8b 2 @@ -12551 +12551 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 54fc14ec0584ee0ac492a3b596d12b6c7515c8be7a7c674e907b7b6dacabb171 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html cd17b94bc46ec9239dafca158eff6c9824a6cdf1774b8aca1b2d7f796b604c6e 2 @@ -12556 +12556 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html b0f9f5e4c91c4263043e984fee5e7fa8bcffb8bb3be33a6b13d6acdbcd839eac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 85b87355ce9eecabceb084eb0f45371eb95e7baa0803f81a52e91ca5c6c2b011 2 @@ -12559 +12559 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html e73d6dfc4f3bff59f8ed0d156b459d44d2376701738d1f4bc5ff998bf047873d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 20f44b2f19e51b38d1415a179793c138c1f410671bb04caa82aa4bb9b6667126 2 @@ -12571 +12571 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html fd6410839ad9d728aeb26540bbe51fdcd038aa2db2069bed24c673036e7cc348 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html f7b7fc7b5b19a14a0847f84fcd3b14b78aefe360845cca40b00e2108fc14a7a2 2 @@ -12578 +12578 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html a2ddf4978be9cbb44e38043c4d90490d37a427263e69a44a166819c7b016f120 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html ed61ad2738b2e397d0b8fdf87cd73e80eb1c23b562ce382832b48ef989616ed6 2 @@ -12584 +12584 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 93730f26793a972aadc49eb9599cb2f1207e9daee99f0619d34b51eee310e673 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html e3ad24afbb37c326c3f928184366bffeec23694e48ad113a50006732f8706d63 2 @@ -12587 +12587 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 7e9530097f44f5a48e92c9ef046fa66b7cf18e32aef229a218c79e9da7d4adb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 44bb2f3e936913576edf14d4d369f9ef9c68d654ab5b71749c947b12e54a22b0 2 @@ -12590 +12590 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 74bfaa915cb784c49f81b37ec5fd1dc57f1ffe944f4e51ac5d577462100bf08d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 8f946dee3c3d103a515ca300894d241c17dae0e611d9a0e2ea79f2cb7e928327 2 @@ -12658 +12658 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 72112263dac6f4743b18128618ed5354955841439f2312813650d4622651307e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 1542b3382914419eacfd1697289b777fb2f4e0bbfde536c03c5f0ad46a118922 2 @@ -12677 +12677 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html cc0ce26dc5d206cce3b4a99dc1cad0ec316dcf954495c6abd96a1ffff4ad8ce5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html e456c9b3b2582e968a69a4eca678ed24af84b28d79420e0f685cef4e626ffdd6 2 @@ -12726 +12726 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 75e6e1865b34e9d521f7991d67247f3e0570e888bee27ab445d53f05e4cfee69 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html a98f805e3ab09a7071be1db5bff532b918caa5f0ddae529ea824d1e519978df9 2 @@ -12729 +12729 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 0f53f4e762c3bdce173c3bad4ff4248a2c4f47a3d0c678a4b8adf83f12a20e08 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html a4a21f40bb7939f50bfcac13c5a738cebf07c96cbbc11cd8db4b36e58d1cf48a 2 @@ -12741 +12741 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html df090b126322c358a2a2f14ada718e37b5e51ebd1dc8575bee7592bbbcbfae3a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 5d4818ef9401d87664487284b52e2685ba8c3cf388d8f26d93a8b022da0a6d5a 2 @@ -12810 +12810 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html b2cfece9f306ef26a6d9e2fd1493982ab74050fb74bca7fc9a6017f7affbf262 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 255ea33900ede4e6b3c79a9b0486610332076fdc82277013f8064dea6adbf498 2 @@ -12932 +12932 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 557abed3d0c03bc8c15291a71e112c3e6bfb57a98984cc783f8f40690de69461 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html fb653d56ff7ff22a7bb15d6ab8b67294526e46af1899d38eb5c46e1d1519de03 2 @@ -13005 +13005 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 20cbbbaa6b477ef4ea0227ef01021e8257be25fdd80d3b5b46a346183520113b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 7cf337d165eae4f42aa94bdadbea36ae86c3dfd4a21e4277f1a73979cbb94c20 2 @@ -13010 +13010 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 6aad91063ba3ff738634e63dfe2181af3911c1e8c91e558274ffc09c0094b5ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 3fb2940f87c46407abb9ae5f88f3001f270046d5b7a9bb23e4e60e3e56eba305 2 @@ -13023 +13023 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html a3e586f6ed044019d6df5abe08aec87f24b6bfbd0c638942397f8abd48438a14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2627acd8b9e29ae7a801642ebed59214025b01ec83079d0a3b7957e42d101fb5 2 @@ -13025 +13025 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 66d110ff4cae014bca7d8e44285b5e57f899ae9d8f0566d63058ce76a05c7444 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html b0e626aadc2fe4c4166df1434fc9f6f217cc219039e249510c9da73f8156cda6 2 @@ -13032 +13032 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 006fb5611f4aa86359e7f0ce47a17f753994c140e8ed2c84bb4e4248938cef46 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html f41f4a9b5890ba23179a9e6a1034534543b32237f10cfa61a0a7d9e8beb0fa39 2 @@ -13037 +13037 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 832cf8558c8d2299387bbb46990511f9993dea9aa9f6698815d0c18ba7d76269 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 1e0b528377120a86f68f4648c76464cb8248db374c24b6be339db14fe82e06d6 2 @@ -13213 +13213 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2b45305b035db6878e633fbd47523f5438dcb9e79945f7139a556ebc88a60cab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 9014639ad20a5fe9a555e7d19a31a3af1b37855e550e89abc85e9aac442ca253 2 @@ -13218 +13218 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 71e81765a98024dfa149a60ca145dc7a9fca66b68f2733fe3ce81959173e47d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 584a2df8d714a4995e25d1574e5b717d581e98113f60ddd7c8488c511894818c 2 @@ -13681 +13681 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html a576bb6996cdd1185e110656261845c41f6de67a5a766592a4afa4163b43cb4a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html a87a6ea7fdefac7d048f853d4baba7a53b5da9829a16c0cc594da9676033d31e 2 @@ -14351 +14351 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 164582b635f4f06f12dd4b57e9cb22fba441098102ea8f89ba68904e8efb5ed4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 47f601766695ca7452c5af37ad5daaa5b6ef332fa44538972073c50c5733590e 2 @@ -14366 +14366 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 47e4e103b4449352634293bc2cfe6acc59f3d6bdf6253210ea21a0595115782b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html c89a7abcd95f68b857cf43849a4abb6c5f6d9d1e6523bf93df30406cd99470ab 2 @@ -14369 +14369 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2cca334bddf268b8f62e60d4f0cef91f744f84f7180a43073e52e14e6f96cc28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 0db63910a29b8fbf33df55ac9b131254e95897cabddb7338410c888a8e8a7055 2 @@ -14371 +14371 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html 67a67de01b198bad16e249d6e27560a43591edbc74726929a1ae0c57f5dfd187 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html c4a5664e41f3d85d4d8fe30c4e9e309555c9d910e262877c26f0af25ad80a77d 2 @@ -14383 +14383 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 3ed8a5bc8fa10c59d9cb6a5d01b3b274dd94ccc8543bcb37d45d911105bc265a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html fdd8fa21e36cf931facba2cc44ad52746e1327a79bdd0c1c9c1bcb35abec0d97 2 @@ -14411 +14411 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 61a24342e263df221a15f94207a276d9b5e22f6a6b95d4e979d36dbaf8617daf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html b5dc8102a277089c67aa18cc7d7166bac0c90990504b44fe8ccc622bf9010af1 2 @@ -14416 +14416 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 720fb844ad6a3676d54ac2b5bf5fed1dea9d02daedced1cc0c83a01c95202093 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 5535a54a323a9a51f44c1fd3adb925a2234ab055bd9dadb308d5fa57982dba69 2 @@ -14418 +14418 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html e0bd74a0e8d80549090f56f4427b96e9d6f81d0e3457cd30bed906a5c3d134fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 093b8bfc6e050280fcdc92615d2b4b6a94d07a6f6f311302f1a39ebc5c89c060 2 @@ -14428 +14428 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html beb69c29dce3ff961b88d107d1b4909d61a412eeab483ed6fccf352ab512766b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 1f587589da5a12586afebe8112cbca43d96f0b024ba329a0f8d9dfd068fcd95b 2 @@ -14442 +14442 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 933e985c61ef84af1ff0ce73eabe5c7c96854d47aadfd899eef9f3d774f8cdae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 1f72f1cf397bdb97fc4cfedfd88410678843fc69d06b4a4f041e1407e3b1df80 2 @@ -14446 +14446 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__manifold.html 77197961b99c36ffb3686a18ce19590fbcd0d8e26213189f3d82a0f6656130ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__manifold.html 7d78481e8e7587e5c2e9d041214cbfe828e84c73640202b3c4de9d6acd2cd3a3 2 @@ -14448 +14448 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 940bbd1eaba1dbe5ebc53936fec97ffa4662db95a449534016d3beabbbbfe403 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html e9beaed29ba53587348f65ed7e884ea8efb39cc8554d0addaf04b285a911a2fd 2 @@ -14459 +14459 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 1c62c958ab09cee8fbb999e1a3d49029d663496c1c6de1f4cec2a61480a60531 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html d1c1d718f4a88573815abbebc9aecbbb752ec5575c6f9559b7cd3d67a6102d27 2 @@ -14464 +14464 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 68bda27584b0ac107f220c46f32da6c525594303e2acc2a58bb05aa41492f481 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html abadb726f5de5c5c0598274e89a7c59a980f3c8df0027be12af8930ab36c031d 2 @@ -14468 +14468 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html e66e9c8063fce10b5e917af372c037e3f9dc0b9d3c59487e6612074d088c1ab3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 0a305245f37f4f26bf1984376a696b7b146a9242007bc7e0db9fcd945554fbe8 2 @@ -14561 +14561 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index.html 41313e1943d3fad12b5d2744be5a68adefc4ba856e82d100cc6e48b82504183e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index.html a1069dd00c28558b1ccd41777a623434f791e8b1e0fb028d18adb4c14639308f 2 @@ -14564 +14564 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 28296ff5b3901efe295c249671ecc5ca422a1fcd9bf86d8f2b1fa5d1acaf0101 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html f7b3700a69ecfb2da0dd19de5ef67e0b4825a1366db40c0770e023a25e86199a 2 @@ -14899,2 +14899,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 4d55029e440b6e90ff84d11861abed47381d961f8db1276834a71c121d6497ad 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html d6e16dd04aa5c8adb24d43c6c39d8374caecc37c62019e2f64ea4a9d33d7b63a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html dfa4f9efb5b3851c6e81b91e76f94f0f2ad7720aa6a2550013a98c353109c4f0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html d08ab0985806590c0a2ad9db8f6258240e1d3848bc149a7d17c0cb15df839542 2 @@ -14915,2 +14915,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 616460ac8a4cd3953d8bfe4932d24fac9bd0b29460ebfc0305db0efbc30597dd 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html ae11cebdee4adda7452e5032e6a24feaa1ba6a5c717f028aa0533b41072968e4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 1f655b55e48810d2727287413654162997f4ec9210b0349ff1bc50ab911bd718 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html f672614fdc1adb21bd367cfd9def8201f30dc37777eb44797acf17a5e8cb125f 2 @@ -14920 +14920 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 486ef6f47e5402f788e4845149124238c5a079d16e24ae509fbb96d4884e4b11 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2be08b2fa3e2edbc41bbbf6798050af4f23ba1466c8213e9b1b4567472bddf49 2 @@ -14926 +14926 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html a759597cd89f3786c2f797efb601c38a1f9acf31f99f99ac636abac060137da1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 4e62c4329b6414a3be08dfc2853c25bd0ba6f7b9c9ddb3c997e55ef3ad86b6bb 2 @@ -14931 +14931 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html f34ab4a576e3fece9fd5be9dce2172b6d28e6fc5bcafc976c5f21cb02cd734c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html d799e3df85b4a8e512913b614a1a2bfb98c697e481e9308f18dffe9d33081171 2 @@ -14936 +14936 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 99276de4b462df4e5a22f0e1aa01e578fcae57402e325c20b956267bc636948e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2ae39b275e7ac9d6e5bc35f86fe862e68615d567bbdb8843b76f6189497dce4a 2 @@ -14944,3 +14944,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html a39989ee5932ee10592691c88dccbe0e4e7eedfae1e13f5c0ff645c1610856a9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html b6ec1cf207b14bfbc171a6af1d12800ac9cbf67c18abcede7909679bf8e77a92 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html beae6ef5524a06d442c316453e82dc2d7baddf701cc28dc7ccc6af4cd8911da9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 4fb880a4eb6b2cf6f9cb534e168f573d8a3808d42c3779b4f249f398685efae3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html c9b5d884d88501d4a78cdb70912810c8898fac9c19c527cc027dbd51cdd0c3f7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2ab80bc9b2db201c70b30b2bbaed7d741dea10537068d4a03ce5bc6e01199415 2 @@ -14952 +14952 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html ca790226c0b73ae95f5705c6cd21562eb05ae6f659b3d02b9bc39d5480da16c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 6128467bcffc06de7b8a8ee07b311fe8c91fb513e5d54fbe21c8a14bb4431c27 2 @@ -14960 +14960 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 83879eb0f57bff25fe9aabe5f297abfc9e2a611d8d8ef4a0deb96c4fd624aa30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 3df6561f76f472e1a54c9860693111ac77bb5825b6a62bc3b87ef536e44ef04c 2 @@ -14962 +14962 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html ad6428fba14c11a504beed0c8b4ab04da3a31ad2125e76397ada6c4dcdd81655 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html ff9e142ab5a298ea4253cea8d4f8d921d813cfacada531a36abacd279af86244 2 @@ -14965,2 +14965,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 77c8ac12e1e1cd69fa15d7ce65cb8946b42aa71da3ad0130b4b911a53ea7b174 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2fb0d7e68ffd15b7ad4e47b9d2b8f2f90babdc6a70bf69d6c0e8b514db6df65f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 6368c6c20e30d51f7ed02a5a24050275fef155b39c86a7e0b2ddedb101b131ae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 7c26dcd7bd5945b74e286973698c0ffd28fd203393376304a648330639bafa5b 2 @@ -14989,8 +14989,8 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 39e37b2a2c64eb7fd3f5b087c4ff2c3df7445e699e3fbbf69f5655689dca5cbd 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 91c0b461a52f667b93904391faa5155a3544f3791a55e20bfbd5f932f565b541 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 264d813e99ae7d4ed041576c396f34eb7a84479e1710b3dc81bb65fc256b43ca 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 124e2a76e86921f7d7f45b50da80ce29c02e96d740e86d0f5468b1c784ac2037 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 8a3a966b9c8b398de62cbdbb9476755c02a53c1cdd35f45b6c566f97940b403a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html e2d0b6213bd5d88ce6e845636ffdd4873f694d46575f531a03a9e94f4aad2585 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 97390f80e16ddf796fea92b887fc85c2e31c9b2fbb42fee60252f698219c28e0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html c9091829060c46cc31e1e3cc666593426d741d3764aee18592012953ff91941e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 33e2e7c4d16b0b01aa1c6536e948af651b3a41d892946279889f5cef00aaca9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 9f3e0db11fa9452d1a16e3bac573c120f4b0309b1c82bdcb445dc1c78aeeef6e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html ca8d5f2b91985f783003af7deddd0b859cc238550fd9d90c6e41d0c5e28ff3a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 5fe8c7a4197ddcd59f546a868365fb305917e80fe6e86c8e5bbd6f9deeaa5e60 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 5fbd6347a6b8c3bf7be3d2e0418dce3df31f78e4a658a10f41a6fc5d3a0e2999 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html f1c59ad7653bcf97e46d32b062a87be5b00ff2b65d1178ba73d523c825112b8a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html db202d8e145acce946f36500f151701a98dea8df972551ec07b68b4fc493c97c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 3ab75d380f8773e103e3cf526e7e98bf5159447f2b60267df943b907c867c095 2 @@ -15002 +15002 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceMatrixCreator.html 55ab72051cfbc2dd7eda3343646af857aae697758472c80fde7f9412f1589ca4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceMatrixCreator.html ed992db20a601b4cdaf881e4dcff35e9c99b2bb4df2a0d0b46a6d0d0ddd5ba58 2 @@ -15016 +15016 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html ab34678bc1ad1152cacfe79102c0b61415b1a34ed288d24e73f5e5bd5066c295 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html c766d840e917fe2f8e46dd323fa00ef4e1cef5fdb8e9b08965d54e8096ab963a 2 @@ -15020 +15020 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 8d54dd4fa9060a0340539a49900f40093574b3ff624c9c13ff9554f68b855ab3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 9230accaedf38e5cf0f7ac2a3569a27d7dac557684c715f23fe84e7c40b12ebf 2 @@ -15029 +15029 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 7d001221aca6c5de9ae515419e32f3c20360afde8e043b0bba0c06a748b56512 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 29b355675d56e1b1e4754c6932b9c61d617de4171f7aebc75a421915c5047f7f 2 @@ -15037 +15037 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html 18df4ecd197c858fa2b7534535d890c11d73fb6b334c2a37437d0fdea71633d4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html 05dc68b33bc8487e3377b450c1a7ed2a285ff8c8e850c129bdfabbe4525492dd 2 @@ -15039,7 +15039,7 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 8a00c4724d13d4319ec43f957d1cef68ee13e3b39f7d0f0db7dd0f8fa7f264e3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 77ecb16b7ab352d20125538a8c81e6ded567783d813be3e86c6f06491d69c57d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 8dc10c38b3861fd4f2f5ef614b0cbd3e24b3b52c7b8f9762399bee635737dd8d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 7ea48f7d48bc3d87c03117874f07780205b7143f99be1845e7574277f202df0f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 91a59aef9388b331457b53f88db5e741bb8033947a1dd8f503dc94986bf96ac3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html 744dd3460b91696b474a2a4aad6bc1e7cdeb736af3b8fd6e9d99793e03b68a9a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 3e39c0c6e5b3fd26db395c779622132387ad76ee24229d1f2983aaae474d2e5d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 3724ec9e9362e38b2446e0ccf6bcd6b89c552403e4627d77ea3aa1508aba6344 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html a36b8665158efe66b6f927c2e8610c7b64614b10bb728ad62d6fcbb180594a3c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2f14413ae1a5ecffe12f6f6e1e425cbd50cbd31dc8ad12d95940bb759ac67eaa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html fdac09aa9f25228985f6ab2fd225162126af0a4c048241e37b48a2eb821cf008 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 0252dcd979df8e5b39258f56c255a474add8929a6f28f07a0cfd2cdcbcea4a46 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html a819d3a08a753669e038342063607b8ce868eb1e9585c7a5a9e63d59cf0aa605 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 61d6ad379a2d0bbc9cc3463a4f7f66a854b3cc7b6cbadfc3e5434adba3c8c094 2 @@ -15053,2 +15053,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 7472179f7c18ac4d9ea8c7e5f9c2d54998b7ae1adae54bf2577eaafc08aa460e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 01ba175258e52234c5487c1b39f216b88aaf7b2d402e692a728ac463c661ad59 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html c50d96aa59db576898c8c05ba1984c056eadfeac6a203a0aa21c102aaae1e4eb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html e0481e41b256f975a41a778d29f53f26a5615dc2d9558324af23f67fa0f624fb 2 @@ -15058,2 +15058,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 9c8255f45676b2d7efc0974ec0e2478e42d36b89fce159e588c92ae86a47f690 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 4fe2c275980c34968f13c49b9e3dc7f32ffa9ad0bd0173f4030a08134b6042a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 9eb4448576d242bc92b18e2bd08ec687def7bd8e03f0404837532080dc0f8ab1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html a8b40f0cc1e1fb6fbb5be79e00c3a79caace05feab0ab243a4e82b41fa265906 2 @@ -15061 +15061 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 1eb7df3ef256fe934f9b0c5b117262080b8491e7392693e8ce1b1b5cc7bd8c42 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html ec50d951dac2be06cf3d17dd0a2a889b0cf807f9afca673dabfe7a7559d4f7ee 2 @@ -15066 +15066 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 29219bf0ba9526a9ceec05d6d0a513eb9d9ae608b5634bd4f7aec0346cc66397 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 71abdbdbf61fe6d60d41aa711555c311b857fd8d157d741e4fdae4693703c4af 2 @@ -15089 +15089 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 39cd0cc03fd050790482298cef89b16d8499bb44cc006cc399c05798bc09d26a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 1284128b98e9b3a6ee9b3af25a2323f7e36ff4cf395aab45546d2130648be276 2 @@ -15091 +15091 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 3b0214d5f86ea9b0a154b9948348a03c52cf21e8f1b193ed2d1153e620b60fd6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html a24f85f235223e4ef851c5deda07552d99c8c830f428244ccc79c48c09b0e4fe 2 @@ -15102 +15102 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 0a2dca1184772abb7ca671e29ebb48f99b167d91f0f17c05a529e910361d3ec5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html b0bca7bebc11d4cfd2797cf848522e2e38a664c78004437fee149aa5e00f8c20 2 @@ -15122,2 +15122,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2ed1fc94c423356024694d967da709de6488d59c0ff734e30934e24299e6f12c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html c0f9e15445ba29aac902534fbe10f93f0ff9729d0c81f72a5df09c48476a66d9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html d01588fc080821a6c7962bbce5e154a0b370090a2f9bccc6a3ada053a15bcf23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 43a61f57ba06979e0ebb282d1f6a8429804a0283023da8051cfd14f77252a957 2 @@ -15257 +15257 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html ed6abfc2114495fc36628fd7bfa248da5cc916d7db38fd4d9cc828f2c3608719 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html fe28dd45a22e8a909f5d0b3fbaccf0e7a02417a073806b0477b2347ac4489074 2 @@ -16330,5 +16330,5 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 1a3dff9930478670515dd5aef8710955e0ae379dcd682ff3365e9a57ae9098c0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 1f506871d8b15453e70e19577f73bcd22ad459385cdbe986380441c84fa1457a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html f3b330d8630cb6d934c7e817c57c86ef2e3fcf9451894d03517cd83aae06063a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 9209f1778a9cb3e661f34575d9b6a3e055505ff7be7299fad5b602cf2742fa64 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_12b.html 35e75d6915d57db5bfb305173a0cabdda7d871d0023b61fed889daad699de833 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html c87cd1c36ad2d5905c0c2d9a7f691870ccf29ab9b5db1a993d0887c3ff5d41fd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html b65aec03ee427de8cb4c4a3d43be29146a2635773e5ff5524f5ec0cae3c65811 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html ae81aceea6feacd8445ac9641ec1d53b1e2a5f02be1d332413f322247a6c08b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 3091f2619aad5d50db86a1d860fca252b9c926d95fb7562e21fb8d435987ff20 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_12b.html b9488a2e7333071bc39e969b4cd0afad8b4e015cc318131976640fba32518f9b 2 @@ -16336,3 +16336,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 1a3af43997680f73ed93a2269dfc50d278a55d82d844311a454b5946197e0b93 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 9285b7acfbc0a707ccfe01960013ce8420b371018a7d1b1b4208c57b1edd952f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 01d04c057103576ab7519fe7681d1adea68e9e02f4861fd61be244155b50a90f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 985ad48bec533ce5a393b552e040a4fdf1cfb514905287ca0f054bd2bee4f395 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2c9a9a5d10da44a88b04306b27e8e70208e0b3171fa6a14afec0e90e477b1532 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 9a0b7e2cd663ec73677162cd43f9357c2fea4f8f566ce338640c01e935bd25c8 2 @@ -16341,40 +16341,40 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html bd0dbd51cc6b1324acf5dfe311188321b32378b46582a4794c3efa2af6eb9995 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 6bbe501280eac806146d5dc53ac2afb7e3330439233954bd6a07a0b8c0f28939 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html c48a7aa98bb9755b36c6a075b5abe16bf118df379a0efe5cca49ae84f4c25085 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 7eff4749a90f640372c87692dad82b693dd37339d7e8401dc5427354a3230093 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 6b7a7584269343914cd5d4bc0e2c868555ee5cbcd57057fcebb7b6713cde22e9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 91667a2a71a0817ae53d8c7a24ba04b3be183bddb160c9ca47dc855451417d81 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html bc3cfbaf894e74bb22644d138e5f92b6b8cc10fc7394e79f7ee7280d6128767a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html 8829f0588bc54138b43684849fa9299a5faa5c73bf215a45c25899aede71e49d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html d233748819569e4629558645f6029d8750cc424aeaee222e60b1f46cfcbcc1fc 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 23118be4292e19a8c81634093576bb8f47f4b0c3e8c4bf40011a557f3a609c37 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html 694c73f9e34f738afd58c696d942da21aa48a49240581fe56c3df0bdc3419594 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 8fab060ccaa8860c95497218f09aa95bc6f9b1d85d1a322b46225db75f87c2a0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 7a1c757b0984321874932ceb6420c7f2b4cb565d8d07e2144c70e26fc05fe463 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 5ae95251f05d6b581bbfcfce7db1f0cdf0d43e550e416758b96118500e6d8d2d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 6dce30bdcbe0edb8a17d124bb449eeaf0d0045dc915368fb74ab7fe315b58b4c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 4808e5e3bb517a7e92e1e95bef46c4aa126198e89dce603e8b02101bf9af4f08 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html ea2afe4a9951ed16fb5538890c666c7d710e273669bf2faed7581597df763446 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 7c4c046c44f10f0535add28d0bc24f47fb62e597fa670b5debc4988a1ef93583 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 9157bdf8028660ff23ca33476972cf09b4e7099a1e780bca86553bdd0aae229a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html ef4ddafbf5e86521e82d0b49fc893d53d1305fbb4fa1b6be45c5c87de3be9e2e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html b939e50ba35614ed24fc6daa10b9fe955ceb2d80ee7d458289a12bca0524d800 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 8917b810648e5f5529c2bcfcfc7bcf2cf20c7907707aaa3b8baf696e702276b1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html b15b5e6c78c371b3abff5132dfee3a6d2dda627fddafbf33cd444ab5f2ece961 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html a2c504a869985b152b1f479603e4d44e8866baa634787dd9e8ee1aa1b310182b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 1b65f26781012eb3ba1d43f3ff78307cc9ed0794dc4d14f0f49814cac16573ce 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 619de046b387979dcfe1b8fd4e1aaf50cfcdd1881f648c1e77ba3ab5c04d0e43 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html f28fe7f72e470024fd02bd87fea6073af1fff035a162c9a3a787db5a7dd01e8e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 59550c8e3a105149b13722274075a887b628602b527f8914fa45c028decf6fd1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html c3f713662faf1076fc358fe188317a8cf78d42c0dcbf5a283d8e6f0131a913e2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 5fb81c83e0c7c7b7cb42394296e20629c5117f11dbbfbfd29863e51a54611f5a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 18eb6c253c29125a4e38ffb239c1c7a51ddf4c9279ce062b20d403dd55e392e5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 59596736d568f23648fdf3b2241a0a34449ccc05e3f2592b6e4a9af1f661b912 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html d460e43571d5a5a8a38eeb75555a436aab87c1ff2b29949babce53923110dc17 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html a3755e4dd6f04dc7d8599a3331f3d631d7a50f55b718570a324402c8db0c646c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 2b23670d54bea8e67101271dc8f9cf3a021c7868b1b74c0b26c424e7cb3836fb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 3d2f377fa4913c67d1959934f36ff287e7a2c73d923369d35174c0dfc80ade28 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html e8c27c3d6d908f2899d2a932af2b8612e8f85e674620970703c5c77e1e957417 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2f187f78763c2ba23f3278b5db0a54d85d1d4cc3ac65ee80cde4155bc05fe642 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 3f8f090343944df92c68944b62ff2e7352d1c6ef624fbdef2494f0373126c080 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 9b0d184895c6694232aa9193c2a017cd7c953e752086ddb8b2ababfcc4ab0b30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html c106d7c79a2646898080637d6e7d62d9dbb96bc1a8012dd06b967e54532f054c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html e485fcdee498919e503288db83dd42e4c418605a3b15deb00880cb178bc49621 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html fb01f5506837ba02900a23abd332e26b115962a2a3dc0e97cdb37f4ffc7f902a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 84109b84d204a42dab0ae068c0fb3050337813cd909431536d8f36edbe9c3861 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html fe3c501fd0f8ef303080190f5edc9444d8f0723ec41a56ca14458b0dd6e316c7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 125b9fc26404f6258892cf6c9535809de5cf64cc03cd38d7fee33df51093fb30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html f261b0612a30580cd5a35c60e9a4e14c17fa5fb815b90ef9d2e00fe97eb7c5dd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html 412b978714c473fbb8ee26988c229ff04198d3f1a344f30b866c91fba036dc59 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2e4d4a9c059b254e6c264026cbf15fe6cb9280022b446c609f326ae03e17206e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html a2a7165034bc10e68eebf1261e9f8dfa8ec6e15945420c15056a686d0229a2ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html babd67ddc3599346483e4982dfbb57ae207ceab788de3329de42eb561dbb2292 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html eb5fc560bc25fbe889bd38f8bc7412ba4e92fe9fdeebbc9e468dbc2ea1a0725d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html f80d42ac62ce8a670939b91955e2539e8589bae9f835a028cfc9c535364485a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 02dbf1ef07080a7458e6179596cb0bb5783b3e94eaf7e313a614ef8d6564ea4d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html eecfd6ec1b1d6c216572285cdbe7a769ccc6d7fa7a7d606d816ae3da0ffdf426 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html d6ccf7f3202beaafa13b38bbd3d1c415e12915b15dbacf3b79eef84ea18a96b3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html f8c791fe08394168e650fab8f2433ea1859ee144a119423ecf6ee2f5f6b0d787 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 44eec43779398390ddfd53a1da295a4b2e1193d2d1af16b4e524db3d51bdbf14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 386650fe72dc48dd3dd15328d4bcdb7328b57c19dcad1a78df25cbd4eaab0f6d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 10942d587d7ef4bb06611f9a2618477b0ae224e00045460f7b0ecbe5f57ab12b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 680ac711cf0d29d36d179add34428ce269c680ad4b4dcdde9270641a8c1d9edd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 9f75c743bbbe90ed3486686f53d69b3885f5c802e73abfc2b321f2a6e774f0ae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html a7dc69917784b29cc3ad15c3b1384f170c08e1c199a7f4912014403d2079586d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 93e3fad41312b68d1a3c5d6714e4053b2fdf2d00f3361c709a8a0ae31844822c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 684d20e1c6e9fe30a248cfe6506cb9d2a7ec83e9a632494ed7efebab8737e34b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html fd0b1c15a123c7ac7479a4071e4f6c382eeab9769f1318bcebb2bca682b0f8b0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html d719128c4d4ca193bb8ed01d0b4759419f1ff8a8327a3675052ed20ebd6cf4db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 8352c6e93f284f7f71a51a8f887c35ee513f09d8d5ea1b12b29cf0b759070cb7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 19a0507a01ae89b9e05b31b6dcf2191b56b724ea40518fbce1e4cafafe7c74a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html ca103b2329ea4f211034d058c17e289a1eabe79f5f220f7e66cd27d17bc66029 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 87ef020733bf1d31be402ba75d9e882d0ab5fa5270aab47ae0af95391ed0431f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 581b5cc84e00d58893e99cd356aa5124c571f93bf36d9db4105c4adfb3bed5d6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 218cbced1283aee10530dbf7a4c5a0b80c169b220662324aab26e22b63adf44a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html 13d4889dc3859149e304c617830f5f9c9ab321b041ffe12b8ddd5b4e611e30f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 14e6cfc7c42c6dff66130ef99ec23f17a0a21d21e1a0e42b3e5dab7494e0b224 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html b224844149abe0b5b89b0ae3714342f9067703c2cdbd6ea78dd0e2874aa15734 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 3930e12927202477ca3d761e60be271e5d3c78960f8daeebad585d29612764b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html c32184bd54f49c436d8450e5cf751405e563a0d19e124e73716722f46b694cd6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 62fd01a3ffa06acd6e7a76e5d1fb5fd858b3751fe66c2e31fd496453694b3e7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html b6b8e4ae99595edf11434342958520ef7e3ff797ba2279f84c54cd289958c93a 2 @@ -16382,31 +16382,31 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 0c00872505f35c7c670ac566288a72cd982489b2adf190dc1af3fb5cba926408 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html df6c518d6ecf8ab8d4e1c9847beb94ed7b9bdac5000e3d1a9ed431f06059f4b6 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html d0cf70b83ca339807e55982847bd7c103c3b43e92f8be3b0bca0deb049e55603 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 8e65d66a9af3ef33df88429e9e17299789e168bf52e59db3046e60e694f3d499 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 614cd4532cfd1faefea5287c1ec16318f0492800cf6077cc691ba5cf5c2bea27 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html aac9e92d11b3492d5c49632302e1a1bc69d8e606f45080a0e7d6fe5a7dede261 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 48677c8f79ff4835c04caac4b2226750d7f81c019a91d10509c5bdf71d9930de 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 5dfbf01a9b53981f00fd032a4ba5cfef9a28e8788f9da5179116e61d525a0bb5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 5df95bda70102725f4d6be108af2273041a334a5f6fd5d30d264ef04d8d200b1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 7313b4c069faad538d915fa9f6300ee1e6c940093208df903ecef6c055affd40 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 7b01009a11194efc7f20ccdc1d3e8458adb9c93eff2eca0d62c57ffa54e0944e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html ce8b7f2065b4fac978fda765a53625677dda16f0a497a85b62a3d8a8ee6dc4a1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 8214233817342c692901981d13dee94d958e780c24e13b094ed18507fd5f8716 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 940e74ea8f7a094812c127871d8e36aa3725e790d113aa6311d72b5f6cc3d343 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html c76a35fc7633a354300acef26d2734613b1b63d7f17856715886b709983afe51 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html 2f355ff2b93b7feb1de214a90aa4bbc7a70eee7b3bf0103f528b159eaebe942b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html b2f228df1c0cdfc928abaa75bca5292e710483ebedd1f4a26e3119969be7f4c4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 9445f9e2d1e8e4b8cfc97958127cf41daece266ff1d820721933e709dcd98104 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html f1fa548b757f0bd8c2d7c7b0f68d4485e72e7927037ca47ecc87d1f1e26acc40 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 09dd5aad8e09d0b0d2a3cb00136df2ea5573736597892bce7c70dbc933587f77 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 8423872901848571caa25856aabcb62bbf5f5d9fa5dadfdcb0a41d8525da90ec 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html e5ac5cc5af06cf2fef95c539ed0986d3228bb1f18f857715a46c48ed1aaf8373 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html dd070de19533bfe74acefc0d101549170ef6066f123352b8d192401cff727696 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 83578257a3cca7f34417b4d2df5458877a4b4da6387ee8eb10fab1a9d294a707 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 97617d6bd1a68d9db466e1a897f8eadf3790b09e579d0b1f81331a99de0e641e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html f3923ad108227433eb828734a3a74123c9f1bca1067dbc5b04d371c1402976a8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 93d3cee5b3f36362f9f268394c2f0bbd9d57cad663529022c9fdf894c92461bf 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html 5d8c58336af9b2be1c4bb167debabc5fc634b8b11ac110352dedd5f7eeeee093 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 95e1b454cbc8f81487ad8847ecc7ac2e1485ad4fcddec12823adcc318d00f423 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html 17d109ca493aa088dc8ec32bcb13c6ddf4b48e3b92a51166e2a2f4b07dc6e2cb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html f1192fc65a7f1eb5c25ead292cc82886a5a1b93208a3fd922b8fb7c456cc81a0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 01a8dd73c1f4da2abf5f21adefe042477cd4251df435ef04e6005a9a5fd8963e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 5c9ecc1a96282ea05d03b560dbfe7def8753079ca0c149e47361345562163a61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html d056c5e71abb25efd27600bb26fb5109d9f276923c58f05ca5b9ea884dec85c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html d990afcd6552257ba7d3c2d94355b1f22eb0dba484999c05ff9e5c86c4a83b13 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 59a78bc69f78311d1ec260691fe1a0b476508f44b6f9266c29ff893e5a53284e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html a81666e1d12049f19be1f5d23f83824a783f234a943d6e5b2ecc8b950d78041c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 4944e8cbf945127c4acd5ba59ad15bf72f41e45e41e0878dc5af1177464a5a28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 18fcfdc1c417bfd61be152d428a4079b01f8e7d51c7058aea2522d3bd27d4189 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 42a84357cad3df1708b36ce16c822ca9b4cc03fe70560318aad1228095f607af 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 785aa1855ae6a990e394df2620e1a9ab1e45aba1ba209d53eb930e1c88710fc5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 15f2f5ee78817e25cb55016bc35c2b056f7290da2ab3aaa563f0c5b581d311c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 9a4eca0e4fae5de5f2f2ad09d5611df3786e0976c530a8dbd7616a2e49bf117b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2f1eb659b3628152130af800f023e71596fb62c739096d5842da3848c7902c64 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 3c0b1b761d01a5030c3a266143a627e7d41b2ff3114027fbf6299de4f6bd43a2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html 31a9a992c9c7dee1b9070f9a15421a762bb5b5c3697e9e454e7975f277cfb9e0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html a06d388d985a9331b64ca2c2e472520cd97945c8b228184020d3622de937619e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 3b7669d35ae6457e878a7ab87d8f1b54f69c55217b78123d2de60da073b1720b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 39c4ab74c426473bcb2179ab7b1231484af144893fad35a5a3d84fa47f41fbf6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html 7f396b6dddcfd2f3506dc5bf77ed7caf16ab30eb0b7bbef6659c9e9b88c01fd9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 147382cf3e6ebab20b9bae2d5f7dedb06ae77b988d157a67f549a1f58fce8998 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 6c39bbfff65317f3d6646c4221f79eb55f0d176ee3bdd7f50cdf7482582bb534 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html ae3334f555aa2ff6a6f004e20c237aae02b523574d67f4ee16162fbea789e7b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html b3cfc2b1c05cb4d22d4eacb77fb288944f137be8cb59e5584fcd3cb76d9c83ef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html db7168f0e8d1302e1baa9aace2fc58456834ab8ce4003723a22548150d267a10 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html eddb790f183f30cd7e0378e56bbe261463b563381799645e3c34ce3c030ca3ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 2859116cdd79927a71829ba099fa4b6e8dcfaf198a9683a8bf4143501734695d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 14d86987d50b68f2a070d13daacec44a8daace6015b504591ce0c2cf1f4ce32f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html fcadb98ce964e9c41d4f4425bf7741fa74dbc01854da9252687cc87ca1f362b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 06367770e43b3fa255158ed23f3b507810635367d02fde4f248097ee750db37b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html da0331f5f0ac8bf58a99cdd577feaa7aae73821d39ac41b041d2810127bf9cac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 9f9682ba0605a3173c78531ab8b681e76d78cc2ce81e6353f1e007c1f931814b 2 @@ -16466 +16466 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html b5e40c6e5bfd3b2214d79f08360a83dc8c4836557d17d2bbf9c6a4fdff4fefef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html 8bb6375e8661f0031a3bb82f32659eaffa4bfb4e79ae2f18bae19bf41c6f5ec0 2 @@ -16471 +16471 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html cdcbe8611016c7991c3df21dafb71b3b7ffa3b41457b4413db7fa81cc18bd919 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 3bdfe1f6c48b61ee36e0a355d17655362932da4a10261581474c442f46b0199b 2 @@ -16522 +16522 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 967c374a6afa0a1ee9cf98c0b3ebc06189748250a9fd79cc9fead1cafd91c4a4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 4928c6122c5f1b948e556807af29f3bebb215939bef31069368512f3a4788033 2 @@ -16528 +16528 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 98722442c1c0e5b81ef2f1bb13133aeec8bde559d0e04c360c01e2902aeb3142 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html f0d1c096472bef8493659bb9deee084269233f60bd1ffb7714fc74c6f60aab62 2 @@ -16682 +16682 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Scalar_1_1OutputType.html 1e172ba66f0b06578da06c749ba1c067e32bb7629dac3ac023aeda4e0b4a7d07 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Scalar_1_1OutputType.html efd223e38235e0c4a351a1c1c7b718bb4be6a1e46b628d5dc66bc04529707d8d 2 @@ -16686 +16686 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html 57d932be6fcf51a792f0fdba4a41ef3266460e25dfa4e1d4e6c34e311996c7ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html aeef2b33a9363bf2b9a0e75bcaaa2d1f2563e6dc5efe8fc1a5f0f8cb25ba0f17 2 @@ -16690 +16690 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html 00b55e288b0539b39b34ed03584916d8e712d1f63ca7a8ede9d64fab81e1c858 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html dd93902e6703f6ebe1915f53397866174ea1c3ee355fd3862488dd696cc179c6 2 @@ -16694 +16694 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Vector_1_1OutputType.html 692b16cdbc685656f1f1ee843c333a0d4b27db4c7898842051227b3b0a7250f9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Vector_1_1OutputType.html 8e0e98e95a0432bb5bb06a94c5c0ca3481b001730883fab967e5cb12b4770fc1 2 @@ -16704 +16704 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html c5ec90c96769baaefc23724fb9aa71e597db1088a51ed57a854b8d7c0e646df8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 2c4cc55e46c1f9dcdde1d1c0aaaa8782780b0cba6d7148505f7d60977b80239b 2 @@ -16752 +16752 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structGridTools_1_1CellDataTransferBuffer.html 67299af81682bec49f4a71b07b3a3d5f14ec368eb15ff448a4843104823914cf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structGridTools_1_1CellDataTransferBuffer.html 168b123dc4e7f4a705d2ec8d1a4a8bc6e53acbc345fb7f44bbf658b02f5b5a92 2 @@ -16825 +16825 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 2dba0ac5e05a5157bbca3819429ffe56bcd8367de95354cd41299ecee6327db0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 08b8bc9422b78db25ce1057a5b88ea7c5dfa7b4b9404256f4a312e0def020028 2 @@ -16829 +16829 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 78072fa0efd4821045ec8532ce2e0bd93035dfd1f36bee49e1091c28efbda396 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 3e293e6643155d7e55ddaf1816161ff7953e5fc7dfcee1e2dd7d63ca20847949 2 @@ -16833 +16833 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 252eb004b8cd70ab8f25096c6fc1bf7cb5271c9a2c4bf316d66dba80240bf605 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html aafe027411e1bfad95d9d9dfd907ef3cb8b4f7247d291b8042df4fb25564da72 2 @@ -16974 +16974 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 6ba1bd2f2bdc3da8d57675bb650fb0d7f90973ea6a4bcdac03b95a61ae6f4347 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 8a5127c05007a27b6ecfcd5b5d0fca8fd2e3657d367d1b9d1acde8ac542bd1b4 2 @@ -17005 +17005 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 8c00e16d72e70d1c320ea8664ace8b7ccaacbc8366d5333bcda30e5c1d005c61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 691fde1375635947f048b3a63f1bf410933a8f0a8bbe6c78dca5daf191fd4fec 2 @@ -17050 +17050 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html f8f4d850e244aa82070c6bfcd58f52021545b8f92d7169b769bab9d3e7b44053 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 701f08433c083a03612ab584a08657ec255005f2aa2b3aace161dc174feda5f5 2 @@ -17127 +17127 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structTriangulation_1_1Signals.html 4fca73ca74cdb1530e74802c603eb3cc93b07c5be15b3a0d398d5b549ffc6e4d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structTriangulation_1_1Signals.html 9642d2eadb09b514baa91d0bc2a5a4ff616a86e13b2b3b30cf58b6d35d6b0f57 2 @@ -17415 +17415 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 92db71375a04e54361c6ef93c336f983326e6d91a588259445c1fe1bc5ac80c7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 804d256a2b9c7f601206106bd1d64e2e2e579c5ac7b581f714842787d950a124 2 @@ -17426 +17426 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html 72f04ecc0405a78c71ab362b47936b819dea10827ff45c3e3ebae90239411b35 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html 25f596d948ca8e25c14fcc74ff5dad8fb26572dc19599052d22f54fd2b8f728e 2 @@ -17954 +17954 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 5333cd1ade0a8a9e706b73160fcae4d27de9c414d216ff941c2785576c579b17 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 9146108d01f07f7f2f7bef1ca6eca5cf808bd23e62fb44cab60c555ea371643a 2 @@ -17958 +17958 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html f94c25fcd1ee487f46cc72b23d9fd2f852ab424655250a563db205317ef7d950 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html dda578d325a4090fe4d1025f7230971b0b902bba7510c3c008c98a9ef42f95c0 2 @@ -17988 +17988 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 91fa9ce5337e933b3b6e5a91e75accd7b76e25148ebd45b42f46a00ba8f16897 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 69d33f1306858919956cb4be629b155434c4f2530f8e6452346ec4a2033f6512 2 @@ -18050 +18050 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 92eaf8eb695eec6698c9d0fdda08c8eca4852c4f235eeba7a2a25bc65723d31d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 7d7141a4b98276625b76cd1808c201dbbb1fde581d9247a38ad4648642fbfa4f 2 @@ -18269 +18269 @@ -/usr/share/doc/packages/dealii/doxygen/deal.tag b5b8a96aaff22f459ffcd9350a437017aaded9d487faee76c323352298b016f4 2 +/usr/share/doc/packages/dealii/doxygen/deal.tag 27d9afd3fdc449ff1a1f09f931167a7a699b94e80c337a258aedcd6fa4af4181 2 comparing rpmtags comparing RELEASE comparing PROVIDES comparing scripts comparing filelist comparing file checksum creating rename script RPM file checksum differs. Extracting packages /usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-03-17 21:57:16.855070069 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-03-17 21:57:16.863070118 +0000 @@ -103,7 +103,7 @@
Block (linear algebra)

It is often convenient to treat a matrix or vector as a collection of individual blocks. For example, in step-20 (and other tutorial programs), we want to consider the global linear system $Ax=b$ in the form

-\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B^T \\ B & 0
   \end{array}\right)
@@ -114,9 +114,9 @@
   \left(\begin{array}{cc}
     F \\ G
   \end{array}\right),
-   \end{eqnarray*} + \end{eqnarray*}" src="form_92.png"/>

-

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

+

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

Using such a decomposition into blocks, one can then define preconditioners that are based on the individual operators that are present in a system of equations (for example the Schur complement, in the case of step-20), rather than the entire matrix. In essence, blocks are used to reflect the structure of a PDE system in linear algebra, in particular allowing for modular solvers for problems with multiple solution components. On the other hand, the matrix and right hand side vector can also treated as a unit, which is convenient for example during assembly of the linear system when one may not want to make a distinction between the individual components, or for an outer Krylov space solver that doesn't care about the block structure (e.g. if only the preconditioner needs the block structure).

Splitting matrices and vectors into blocks is supported by the BlockSparseMatrix, BlockVector, and related classes. See the overview of the various linear algebra classes in the Linear algebra classes module. The objects present two interfaces: one that makes the object look like a matrix or vector with global indexing operations, and one that makes the object look like a collection of sub-blocks that can be individually addressed. Depending on context, one may wish to use one or the other interface.

Typically, one defines the sub-structure of a matrix or vector by grouping the degrees of freedom that make up groups of physical quantities (for example all velocities) into individual blocks of the linear system. This is defined in more detail below in the glossary entry on Block (finite element).

@@ -135,7 +135,7 @@
FE_Q<dim>(1), 1);

With the exception of the number of blocks, the two objects are the same for all practical purposes, however.

Global degrees of freedom: While we have defined blocks above in terms of the vector components of a vector-valued solution function (or, equivalently, in terms of the vector-valued finite element space), every shape function of a finite element is part of one block or another. Consequently, we can partition all degrees of freedom defined on a DoFHandler into individual blocks. Since by default the DoFHandler class enumerates degrees of freedom in a more or less random way, you will first want to call the DoFRenumbering::component_wise function to make sure that all degrees of freedom that correspond to a single block are enumerated consecutively.

-

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

+

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

More information on this topic can be found in the documentation of FESystem, the Handling vector valued problems module and the tutorial programs referenced therein.

Selecting blocks: Many functions allow you to restrict their operation to certain vector components or blocks. For example, this is the case for the functions that interpolate boundary values: one may want to only interpolate the boundary values for the velocity block of a finite element field but not the pressure block. The way to do this is by passing a BlockMask argument to such functions, see the block mask entry of this glossary.

@@ -164,14 +164,14 @@
Boundary form

For a dim-dimensional triangulation in dim-dimensional space, the boundary form is a vector defined on faces. It is the vector product of the image of coordinate vectors on the surface of the unit cell. It is a vector normal to the surface, pointing outwards and having the length of the surface element.

-

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
-   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

+

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
+   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

In either case, the length of the vector equals the determinant of the transformation of reference face to the face of the current cell.

Boundary indicator

In a Triangulation object, every part of the boundary may be associated with a unique number (of type types::boundary_id) that is used to determine what kinds of boundary conditions are to be applied to a particular part of a boundary. The boundary is composed of the faces of the cells and, in 3d, the edges of these faces.

-

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
+

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
if (face->at_boundary())
if (face->center()[0] == -1)
face->set_boundary_id (42);
@@ -240,7 +240,7 @@

Component
-

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems module.

+

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems module.

In finite element programs, one frequently wants to address individual elements (components) of this vector-valued solution, or sets of components. For example, we do this extensively in step-8, and a lot of documentation is also provided in the module on Handling vector valued problems. If you are thinking only in terms of the partial differential equation (not in terms of its discretization), then the concept of components is the natural one.

On the other hand, when talking about finite elements and degrees of freedom, components are not always the correct concept because components are not always individually addressable. In particular, this is the case for non-primitive finite elements. Similarly, one may not always want to address individual components but rather sets of components — e.g. all velocity components together, and separate from the pressure in the Stokes system, without further splitting the velocities into their individual components. In either case, the correct concept to think in is that of a block. Since each component, if individually addressable, is also a block, thinking in terms of blocks is most frequently the better strategy.

For a given finite element, the number of components can be queried using the FiniteElementData::n_components() function, and you can find out which vector components are nonzero for a given finite element shape function using FiniteElement::get_nonzero_components(). The values and gradients of individual components of a shape function (if the element is primitive) can be queried using the FiniteElement::shape_value_component() and FiniteElement::shape_grad_component() functions on the reference cell. The FEValues::shape_value_component() and FEValues::shape_grad_component() functions do the same on a real cell. See also the documentation of the FiniteElement and FEValues classes.

@@ -262,7 +262,7 @@

would result in a mask [true, true, false] in 2d. Of course, in 3d, the result would be [true, true, true, false].

Note
Just as one can think of composed elements as being made up of components or blocks, there are component masks (represented by the ComponentMask class) and block masks (represented by the BlockMask class). The FiniteElement class has functions that convert between the two kinds of objects.
-Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
+Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
Compressing distributed vectors and matrices

For parallel computations, deal.II uses the vector and matrix classes defined in the PETScWrappers and TrilinosWrappers namespaces. When running programs in parallel using MPI, these classes only store a certain number of rows or elements on the current processor, whereas the rest of the vector or matrix is stored on the other processors that belong to our MPI universe. This presents a certain problem when you assemble linear systems: we add elements to the matrix and right hand side vectors that may or may not be stored locally. Sometimes, we may also want to just set an element, not add to it.

@@ -304,9 +304,9 @@

Degree of freedom
-

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
-   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

+

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
+   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

Direction flags
@@ -327,7 +327,7 @@
Distorted cells

A distorted cell is a cell for which the mapping from the reference cell to real cell has a Jacobian whose determinant is non-positive somewhere in the cell. Typically, we only check the sign of this determinant at the vertices of the cell. The function GeometryInfo::alternating_form_at_vertices computes these determinants at the vertices.

-

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

+

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

The following two images show a well-formed, a pinched, and a twisted cell for both 2d and 3d:

@@ -366,19 +366,19 @@

Generalized support points
-

"Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

-

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

-

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
+<dd><p class="Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

+

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

+

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
     =
     \varphi(\hat{\mathbf{x}}_i) \cdot \mathbf{n}_i
-   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

-

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

-

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
+   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

+

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

+

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
     =
     \int_{\hat{K}} \varphi(\hat{\mathbf{x}})
     {\hat{x}_1}^{p_1(i)}
     {\hat{x}_2}^{p_2(i)}
-   $ in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

+ $" src="form_124.png"/> in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

geometry paper
@@ -453,47 +453,47 @@
Lumped mass matrix

The mass matrix is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

It frequently appears in the solution of time dependent problems where, if one uses an explicit time stepping method, it then leads to the need to solve problems of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        MU^n = MU^{n-1} + k_n BU^{n-1},
-     \end{align*} + \end{align*}" src="form_127.png"/>

-

in time step $n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

-

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

-

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

-\begin{align*}
+<p> in time step <picture><source srcset=$n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

+

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

+

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

+\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx
               = \sum_K \int_K \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_134.png"/>

by quadrature

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \sum_K \sum_q \varphi_i(\mathbf x_q^K) \varphi_j(\mathbf x_q^K)
        |K| w_q,
-     \end{align*} + \end{align*}" src="form_135.png"/>

where we choose the quadrature points as the nodes at which the shape functions are defined. If we order the quadrature points in the same way as the shape functions, then

-\begin{align*}
+<picture><source srcset=\begin{align*}
        \varphi_i(\mathbf x_q^K) = \delta_{iq},
-     \end{align*} + \end{align*}" src="form_136.png"/>

and consequently

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \delta_{ij} \sum_{K, \text{supp}\varphi_i \cap K \neq \emptyset} |K| w_i,
-     \end{align*} + \end{align*}" src="form_137.png"/>

-

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

-

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

+

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

+

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

For an example of where lumped mass matrices play a role, see step-69.

Manifold indicator

Every object that makes up a Triangulation (cells, faces, edges, etc.), is associated with a unique number (of type types::manifold_id) that is used to identify which manifold object is responsible to generate new points when the mesh is refined.

-

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

+

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

for (auto &cell : triangulation.active_cell_iterators())
if (cell->center()[0] < 0)
cell->set_manifold_id (42);
@@ -504,41 +504,41 @@
Mass matrix

The "mass matrix" is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

-

possibly with a coefficient inside the integral, and where $\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

-\begin{align*}
+<p> possibly with a coefficient inside the integral, and where <picture><source srcset=$\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

+\begin{align*}
        \rho \frac{\partial^2 u}{\partial t^2}
        -\nabla \cdot C \nabla u = f.
-     \end{align*} /usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-03-17 21:57:16.899070340 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-03-17 21:57:16.903070365 +0000 @@ -340,7 +340,7 @@

-step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
+step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
Keywords: FEInterfaceValues

/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2023-10-24 08:03:04.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2023-10-24 08:03:04.000000000 +0000 @@ -31,15 +31,6 @@ \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -113,104 +104,113 @@ $\dfrac{d f_{i-1}}{d f_{i}}$ \pagebreak -$u|_{\partial\Omega}=g$ +$f(x,y) = [2x+1]^{y}$ \pagebreak -$x_{12}=42$ +$x$ \pagebreak -$g(\mathbf x)$ +$y$ \pagebreak -$u(\mathbf x)$ +$\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ \pagebreak -$\mathbf n \cdot - \mathbf u=0$ +$\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$ \pagebreak -$\mathbf{n}\times\mathbf{u}= - \mathbf{n}\times\mathbf{f}$ +$\dfrac{d f(x, y)}{d x}$ \pagebreak -$\frac 1{\sqrt{14}} - (1,2,3)^T$ +$x=1, y=2.5$ \pagebreak -$x$ +$x=3.25, y=-6$ \pagebreak -$y$ +$g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ \pagebreak -$z$ +$g(x)$ \pagebreak -$\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$ +$y \rightarrow y(x) := 2x$ \pagebreak -$f(x,y) = [2x+1]^{y}$ +$\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ \pagebreak -$x_{12}$ +$\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ \pagebreak -$\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ +$\dfrac{d f(x, y(x))}{d x}$ \pagebreak -$x_{28}$ +$\dfrac{d f(x, y(x))}{d y}$ \pagebreak -$x_{40}$ +$O(\text{dim}^3)$ \pagebreak -$\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$ +$u = u - P^{-1} (A u - v)$ \pagebreak -$x_{12}= - \frac 12 (x_{28}+x_{40})$ +$u = u - P^{-T} (A u - v)$ \pagebreak -$x_2=\frac 12 x_0 + \frac 12 x_1$ +$u|_{\partial\Omega}=g$ \pagebreak -$x_4=\frac 14 x_0 + \frac 34 x_1$ +$x_{12}=42$ \pagebreak -$\dfrac{d f(x, y)}{d x}$ +$g(\mathbf x)$ \pagebreak -$x=1, y=2.5$ +$u(\mathbf x)$ \pagebreak -$x_3=x_1$ +$\mathbf n \cdot + \mathbf u=0$ \pagebreak -$x=3.25, y=-6$ +$\mathbf{n}\times\mathbf{u}= + \mathbf{n}\times\mathbf{f}$ \pagebreak -$g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ +$\frac 1{\sqrt{14}} + (1,2,3)^T$ \pagebreak -$g(x)$ +$z$ \pagebreak -$x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$ +$\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$ \pagebreak -$y \rightarrow y(x) := 2x$ +$x_{12}$ \pagebreak -$\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ +$x_{28}$ \pagebreak -$\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ +$x_{40}$ \pagebreak -$\dfrac{d f(x, y(x))}{d x}$ +$x_{12}= + \frac 12 (x_{28}+x_{40})$ \pagebreak -$\dfrac{d f(x, y(x))}{d y}$ +$x_2=\frac 12 x_0 + \frac 12 x_1$ +\pagebreak + +$x_4=\frac 14 x_0 + \frac 34 x_1$ +\pagebreak + +$x_3=x_1$ +\pagebreak + +$x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$ \pagebreak $x_{13}=42$ @@ -337,30 +337,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - -$(A+k\,B)\,C$ -\pagebreak - -$B$ -\pagebreak - -$b-Ax$ -\pagebreak - -$V_h$ -\pagebreak - -$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ -\pagebreak - -$U_j$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -381,6 +357,9 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2023-10-24 08:03:04.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2023-10-24 08:03:04.000000000 +0000 @@ -33,15 +33,6 @@ \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -115,104 +106,113 @@ $\dfrac{d f_{i-1}}{d f_{i}}$ \pagebreak -$u|_{\partial\Omega}=g$ +$f(x,y) = [2x+1]^{y}$ \pagebreak -$x_{12}=42$ +$x$ \pagebreak -$g(\mathbf x)$ +$y$ \pagebreak -$u(\mathbf x)$ +$\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ \pagebreak -$\mathbf n \cdot - \mathbf u=0$ +$\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$ \pagebreak -$\mathbf{n}\times\mathbf{u}= - \mathbf{n}\times\mathbf{f}$ +$\dfrac{d f(x, y)}{d x}$ \pagebreak -$\frac 1{\sqrt{14}} - (1,2,3)^T$ +$x=1, y=2.5$ \pagebreak -$x$ +$x=3.25, y=-6$ \pagebreak -$y$ +$g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ \pagebreak -$z$ +$g(x)$ \pagebreak -$\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$ +$y \rightarrow y(x) := 2x$ \pagebreak -$f(x,y) = [2x+1]^{y}$ +$\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ \pagebreak -$x_{12}$ +$\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ \pagebreak -$\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ +$\dfrac{d f(x, y(x))}{d x}$ \pagebreak -$x_{28}$ +$\dfrac{d f(x, y(x))}{d y}$ \pagebreak -$x_{40}$ +$O(\text{dim}^3)$ \pagebreak -$\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$ +$u = u - P^{-1} (A u - v)$ \pagebreak -$x_{12}= - \frac 12 (x_{28}+x_{40})$ +$u = u - P^{-T} (A u - v)$ \pagebreak -$x_2=\frac 12 x_0 + \frac 12 x_1$ +$u|_{\partial\Omega}=g$ \pagebreak -$x_4=\frac 14 x_0 + \frac 34 x_1$ +$x_{12}=42$ \pagebreak -$\dfrac{d f(x, y)}{d x}$ +$g(\mathbf x)$ \pagebreak -$x=1, y=2.5$ +$u(\mathbf x)$ \pagebreak -$x_3=x_1$ +$\mathbf n \cdot + \mathbf u=0$ \pagebreak -$x=3.25, y=-6$ +$\mathbf{n}\times\mathbf{u}= + \mathbf{n}\times\mathbf{f}$ \pagebreak -$g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ +$\frac 1{\sqrt{14}} + (1,2,3)^T$ \pagebreak -$g(x)$ +$z$ \pagebreak -$x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$ +$\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$ \pagebreak -$y \rightarrow y(x) := 2x$ +$x_{12}$ \pagebreak -$\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ +$x_{28}$ \pagebreak -$\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ +$x_{40}$ \pagebreak -$\dfrac{d f(x, y(x))}{d x}$ +$x_{12}= + \frac 12 (x_{28}+x_{40})$ \pagebreak -$\dfrac{d f(x, y(x))}{d y}$ +$x_2=\frac 12 x_0 + \frac 12 x_1$ +\pagebreak + +$x_4=\frac 14 x_0 + \frac 34 x_1$ +\pagebreak + +$x_3=x_1$ +\pagebreak + +$x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$ \pagebreak $x_{13}=42$ @@ -339,30 +339,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - -$(A+k\,B)\,C$ -\pagebreak - -$B$ -\pagebreak - -$b-Ax$ -\pagebreak - -$V_h$ -\pagebreak - -$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ -\pagebreak - -$U_j$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -383,6 +359,9 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-03-17 21:57:17.367073231 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-03-17 21:57:17.375073281 +0000 @@ -695,7 +695,7 @@
  • -

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    +

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    (WB 2008/12/10)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-03-17 21:57:17.411073503 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-03-17 21:57:17.419073553 +0000 @@ -501,7 +501,7 @@
  • -

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    +

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    (Jean Marie Linhart 2009/07/27)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-03-17 21:57:17.451073751 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-03-17 21:57:17.459073799 +0000 @@ -839,7 +839,7 @@

  • -

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    +

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    (Wolfgang Bangerth, 2013/12/20)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html 2024-03-17 21:57:17.499074047 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html 2024-03-17 21:57:17.507074096 +0000 @@ -518,7 +518,7 @@

  • -

    Fixed: The FE_ABF class reported the maximal polynomial degree (via FiniteElement::degree) for elements of order $r$ as $r+1$, but this is wrong. It should be $r+2$ (see Section 5 of the original paper of Arnold, Boffi, and Falk). This is now fixed.
    +

    Fixed: The FE_ABF class reported the maximal polynomial degree (via FiniteElement::degree) for elements of order $r$ as $r+1$, but this is wrong. It should be $r+2$ (see Section 5 of the original paper of Arnold, Boffi, and Falk). This is now fixed.
    (Wolfgang Bangerth, 2017/01/13)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-03-17 21:57:17.555074392 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-03-17 21:57:17.563074442 +0000 @@ -608,7 +608,7 @@

  • -

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    +

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    (Martin Kronbichler, 2020/04/07)

  • @@ -1562,7 +1562,7 @@

  • -

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    +

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    (Martin Kronbichler, 2019/07/12)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-03-17 21:57:17.639074911 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-03-17 21:57:17.647074961 +0000 @@ -358,9 +358,9 @@

    The algorithms used in the implementation of this class are described in some detail in the hp-paper. There is also a significant amount of documentation on how to use this class in the Constraints on degrees of freedom module.

    Description of constraints

    Each "line" in objects of this class corresponds to one constrained degree of freedom, with the number of the line being i, entered by using add_line() or add_lines(). The entries in this line are pairs of the form (j,aij), which are added by add_entry() or add_entries(). The organization is essentially a SparsityPattern, but with only a few lines containing nonzero elements, and therefore no data wasted on the others. For each line, which has been added by the mechanism above, an elimination of the constrained degree of freedom of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_j a_{ij} x_j + b_i
-\] +\]" src="form_1577.png"/>

    is performed, where bi is optional and set by set_inhomogeneity(). Thus, if a constraint is formulated for instance as a zero mean value of several degrees of freedom, one of the degrees has to be chosen to be eliminated.

    Note that the constraints are linear in the xi, and that there might be a constant (non-homogeneous) term in the constraint. This is exactly the form we need for hanging node constraints, where we need to constrain one degree of freedom in terms of others. There are other conditions of this form possible, for example for implementing mean value conditions as is done in the step-11 tutorial program. The name of the class stems from the fact that these constraints can be represented in matrix form as X x = b, and this object then describes the matrix X and the vector b. The most frequent way to create/fill objects of this type is using the DoFTools::make_hanging_node_constraints() function. The use of these objects is first explained in step-6.

    @@ -914,13 +914,13 @@
    -

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    +

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    If an entry with the same indices as the one this function call denotes already exists, then this function simply returns provided that the value of the entry is the same. Thus, it does no harm to enter a constraint twice.

    Parameters
    - - - + + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    @@ -981,11 +981,11 @@
    -

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    +

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    Parameters
    - - + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    @@ -1013,9 +1013,9 @@

    Close the filling of entries. Since the lines of a matrix of this type are usually filled in an arbitrary order and since we do not want to use associative constrainers to store the lines, we need to sort the lines and within the lines the columns before usage of the matrix. This is done through this function.

    Also, zero entries are discarded, since they are not needed.

    After closing, no more entries are accepted. If the object was already closed, then this function returns immediately.

    -

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
-+ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
-\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    +

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
++ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
+\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    @@ -1445,9 +1445,9 @@

    Print the constraints represented by the current object to the given stream.

    For each constraint of the form

    -\[
+<picture><source srcset=\[
  x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75
-\] +\]" src="form_1586.png"/>

    this function will write a sequence of lines that look like this:

    42 2 : 0.5
    42 14 : 0.25
    @@ -2025,7 +2025,7 @@

    This function takes a matrix of local contributions (local_matrix) corresponding to the degrees of freedom indices given in local_dof_indices and distributes them to the global matrix. In other words, this function implements a scatter operation. In most cases, these local contributions will be the result of an integration over a cell or face of a cell. However, as long as local_matrix and local_dof_indices have the same number of elements, this function is happy with whatever it is given.

    In contrast to the similar function in the DoFAccessor class, this function also takes care of constraints, i.e. if one of the elements of local_dof_indices belongs to a constrained node, then rather than writing the corresponding element of local_matrix into global_matrix, the element is distributed to the entries in the global matrix to which this particular degree of freedom is constrained.

    -

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    +

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    Note
    The procedure described above adds an unforeseeable number of artificial eigenvalues to the spectrum of the matrix. Therefore, it is recommended to use the equivalent function with two local index vectors in such a case.

    By using this function to distribute local contributions to the global object, one saves the call to the condense function after the vectors and matrices are fully assembled.

    Note
    This function in itself is thread-safe, i.e., it works properly also when several threads call it simultaneously. However, the function call is only thread-safe if the underlying global matrix allows for simultaneous access and the access is not to rows with the same global index at the same time. This needs to be made sure from the caller's site. There is no locking mechanism inside this method to prevent data races.
    @@ -2067,7 +2067,7 @@

    This function does almost the same as the function above but can treat general rectangular matrices. The main difference to achieve this is that the diagonal entries in constrained rows are left untouched instead of being filled with arbitrary values.

    -

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    +

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    for (unsigned int i=0;i<matrix.m();++i)
    if (constraints.is_constrained(i))
    matrix.diag_element(i) = 1.;
    @@ -2356,7 +2356,7 @@
    -

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    +

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    Note
    If this function is called with a parallel vector vec, then the vector must not contain ghost elements.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-03-17 21:57:17.699075283 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-03-17 21:57:17.707075331 +0000 @@ -219,9 +219,9 @@

    For fixed theta, the Crank-Nicolson scheme is the only second order scheme. Nevertheless, further stability may be achieved by choosing theta larger than ½, thereby introducing a first order error term. In order to avoid a loss of convergence order, the adaptive theta scheme can be used, where theta=½+c dt.

    Assume that we want to solve the equation u' + F(u) = 0 with a step size k. A step of the theta scheme can be written as

    -\[
+<picture><source srcset=\[
   M u_{n+1} + \theta k F(u_{n+1})  = M u_n - (1-\theta)k F(u_n).
-\] +\]" src="form_351.png"/>

    Here, M is the mass matrix. We see, that the right hand side amounts to an explicit Euler step with modified step size in weak form (up to inversion of M). The left hand side corresponds to an implicit Euler step with modified step size (right hand side given). Thus, the implementation of the theta scheme will use two Operator objects, one for the explicit, one for the implicit part. Each of these will use its own TimestepData to account for the modified step sizes (and different times if the problem is not autonomous). Note that once the explicit part has been computed, the left hand side actually constitutes a linear or nonlinear system which has to be solved.

    Usage AnyData

    @@ -301,8 +301,8 @@
    }
    size_type n() const
    size_type m() const
    -

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    -\[ m = I - \Delta t M. \] +

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    +\[ m = I - \Delta t M. \]

    After we have worked off the notifications, we clear them, such that the matrix is only generated when necessary.

    void Explicit::operator()(AnyData &out, const AnyData &in)
    @@ -1142,7 +1142,7 @@

    The operator computing the explicit part of the scheme. This will receive in its input data the value at the current time with name "Current time solution". It should obtain the current time and time step size from explicit_data().

    -

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    +

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    Definition at line 416 of file theta_timestepping.h.

    @@ -1170,7 +1170,7 @@

    The operator solving the implicit part of the scheme. It will receive in its input data the vector "Previous time". Information on the timestep should be obtained from implicit_data().

    -

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    +

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    Definition at line 428 of file theta_timestepping.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-03-17 21:57:17.747075579 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-03-17 21:57:17.751075603 +0000 @@ -154,10 +154,10 @@

    Detailed Description

    template<int dim>
    class AnisotropicPolynomials< dim >

    Anisotropic tensor product of given polynomials.

    -

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
-= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    -

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    -

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    +

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
+= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    +

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    +

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    Definition at line 322 of file tensor_product_polynomials.h.

    Constructor & Destructor Documentation

    @@ -590,7 +590,7 @@
    -

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one- dimensional polynomials for each space direction, given the index i.

    +

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one- dimensional polynomials for each space direction, given the index i.

    Definition at line 538 of file tensor_product_polynomials.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-03-17 21:57:17.787075826 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-03-17 21:57:17.791075851 +0000 @@ -230,14 +230,14 @@

    Detailed Description

    Interface for using ARPACK. ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines dnaupd and dneupd of ARPACK. If the operator is specified to be symmetric we use the symmetric interface dsaupd and dseupd of ARPACK instead. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes with serial objects in the following way:

    solver.solve(A, B, OP, lambda, x, size_of_spectrum);
    SolverControl & solver_control
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    Through the AdditionalData the user can specify some of the parameters to be set.

    For further information on how the ARPACK routines dsaupd, dseupd, dnaupd and dneupd work and also how to set the parameters appropriately please take a look into the ARPACK manual.

    Note
    Whenever you eliminate degrees of freedom using AffineConstraints, you generate spurious eigenvalues and eigenvectors. If you make sure that the diagonals of eliminated matrix rows are all equal to one, you get a single additional eigenvalue. But beware that some functions in deal.II set these diagonals to rather arbitrary (from the point of view of eigenvalue problems) values. See also step-36 for an example.
    @@ -510,7 +510,7 @@
    -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    The function returns a vector of eigenvalues of length n and a vector of eigenvectors of length n in the symmetric case and of length n+1 in the non-symmetric case. In the symmetric case all eigenvectors are real. In the non-symmetric case complex eigenvalues always occur as complex conjugate pairs. Therefore the eigenvector for an eigenvalue with nonzero complex part is stored by putting the real and the imaginary parts in consecutive real-valued vectors. The eigenvector of the complex conjugate eigenvalue does not need to be stored, since it is just the complex conjugate of the stored eigenvector. Thus, if the last n-th eigenvalue has a nonzero imaginary part, Arpack needs in total n+1 real-valued vectors to store real and imaginary parts of the eigenvectors.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-03-17 21:57:17.859076270 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-03-17 21:57:17.859076270 +0000 @@ -1025,7 +1025,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-03-17 21:57:17.915076617 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-03-17 21:57:17.915076617 +0000 @@ -353,27 +353,27 @@

    Names of difference formulas.

    Enumerator
    Euler 

    The symmetric Euler formula of second order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

    UpwindEuler 

    The upwind Euler formula of first order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

    FourthOrder 

    The fourth order scheme

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-03-17 21:57:17.951076839 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-03-17 21:57:17.955076863 +0000 @@ -153,7 +153,7 @@ (x, y) = c_0 (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2). \]" src="form_626.png"/>

    -

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    +

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    \[
   (x, y) = (1 - c_1 - c_2) (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2).
/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-03-17 21:57:17.987077061 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-03-17 21:57:17.991077086 +0000
@@ -156,8 +156,8 @@
 <a name=

    Detailed Description

    template<typename VectorType>
    class BaseQR< VectorType >

    A base class for thin QR implementations.

    -

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    -

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    +

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    +

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    Definition at line 44 of file qr.h.

    Member Typedef Documentation

    @@ -368,7 +368,7 @@ const bool transpose = false&#href_anchor"memdoc"> -

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    +

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    @@ -400,7 +400,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -434,7 +434,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -468,7 +468,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -502,7 +502,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -557,7 +557,7 @@
    -

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    +

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-03-17 21:57:18.031077333 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-03-17 21:57:18.039077383 +0000 @@ -210,7 +210,7 @@ void swap (BlockIndices &u, BlockIndices &v) &#href_anchor"details" id="details">

    Detailed Description

    -

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    +

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    The information that can be obtained from this class falls into two groups. First, it is possible to query the global size of the index space (through the total_size() member function), and the number of blocks and their sizes (via size() and the block_size() functions).

    Secondly, this class manages the conversion of global indices to the local indices within this block, and the other way around. This is required, for example, when you address a global element in a block vector and want to know within which block this is, and which index within this block it corresponds to. It is also useful if a matrix is composed of several blocks, where you have to translate global row and column indices to local ones.

    See also
    Block (linear algebra)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-03-17 21:57:18.095077729 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-03-17 21:57:18.099077753 +0000 @@ -787,9 +787,9 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, BlockPayload::BlockType > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    @@ -830,9 +830,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    @@ -1530,7 +1530,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1543,60 +1543,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1852.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1857.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1859.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1860.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1861.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1868.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1870.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1872.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1873.png"/>

    @@ -1642,10 +1642,10 @@
    LinearOperator< Domain, Range, BlockPayload::BlockType > inverse_operator(const LinearOperator< Range, Domain, BlockPayload::BlockType > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1668,8 +1668,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc . The solution of a multi- component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc .

    See also
    Block (linear algebra)
    @@ -1692,7 +1692,7 @@
    -

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    Definition at line 297 of file block_linear_operator.h.

    @@ -1711,7 +1711,7 @@
    -

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    Definition at line 303 of file block_linear_operator.h.

    @@ -1730,7 +1730,7 @@
    -

    Access the block with the given coordinates. This std::function object returns a LinearOperator representing the $(i,j)$-th block of the BlockLinearOperator.

    +

    Access the block with the given coordinates. This std::function object returns a LinearOperator representing the $(i,j)$-th block of the BlockLinearOperator.

    Definition at line 310 of file block_linear_operator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-03-17 21:57:18.159078124 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-03-17 21:57:18.159078124 +0000 @@ -1296,7 +1296,7 @@ const BlockVectorType & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1385,7 +1385,7 @@ const BlockVectorType & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1744,7 +1744,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -1868,7 +1868,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-03-17 21:57:18.235078593 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-03-17 21:57:18.235078593 +0000 @@ -941,7 +941,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 396 of file block_sparse_matrix.h.

    @@ -1069,7 +1069,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 440 of file block_sparse_matrix.h.

    @@ -2061,7 +2061,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2166,7 +2166,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2609,7 +2609,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2717,7 +2717,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-03-17 21:57:18.283078890 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-03-17 21:57:18.291078940 +0000 @@ -754,7 +754,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 371 of file block_sparse_matrix_ez.h.

    @@ -779,7 +779,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 409 of file block_sparse_matrix_ez.h.

    @@ -804,7 +804,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    Definition at line 391 of file block_sparse_matrix_ez.h.

    @@ -829,7 +829,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    Definition at line 429 of file block_sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-03-17 21:57:18.359079359 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-03-17 21:57:18.367079409 +0000 @@ -1768,7 +1768,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1820,7 +1820,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1846,7 +1846,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1872,7 +1872,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-03-17 21:57:18.427079779 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-03-17 21:57:18.431079804 +0000 @@ -1218,7 +1218,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1258,7 +1258,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1278,7 +1278,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1298,7 +1298,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-03-17 21:57:18.475080077 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-03-17 21:57:18.475080077 +0000 @@ -166,11 +166,11 @@ &#href_anchor"details" id="details">

    Detailed Description

    template<int spacedim, typename Number = double>
    class BoundingBox< spacedim, Number >

    A class that represents a box of arbitrary dimension spacedim and with sides parallel to the coordinate axes, that is, a region

    -\[
+<picture><source srcset=\[
 [x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U],
-\] +\]" src="form_362.png"/>

    -

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    +

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    Geometrically, a bounding box is thus:

    Bounding boxes are, for example, useful in parallel distributed meshes to give a general description of the owners of each portion of the mesh. More generally, bounding boxes are often used to roughly describe a region of space in which an object is contained; if a candidate point is not within the bounding box (a test that is cheap to execute), then it is not necessary to perform an expensive test whether the candidate point is in fact inside the object itself. Bounding boxes are therefore often used as a first, cheap rejection test before more detailed checks. As such, bounding boxes serve many of the same purposes as the convex hull, for which it is also relatively straightforward to compute whether a point is inside or outside, though not quite as cheap as for the bounding box.

    -

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    +

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    @@ -731,7 +731,7 @@
    Orthogonal to Cross section coordinates ordered as
    -

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    +

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    Definition at line 233 of file bounding_box.cc.

    @@ -799,7 +799,7 @@

    Apply the affine transformation that transforms this BoundingBox to a unit BoundingBox object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    Definition at line 312 of file bounding_box.cc.

    @@ -822,7 +822,7 @@

    Apply the affine transformation that transforms the unit BoundingBox object to this object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    Definition at line 327 of file bounding_box.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 2024-03-17 21:57:18.511080298 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 2024-03-17 21:57:18.515080323 +0000 @@ -480,7 +480,7 @@
    -

    cuSPARSE description of the lower triangular matrix $L$.

    +

    cuSPARSE description of the lower triangular matrix $L$.

    Definition at line 176 of file cuda_precondition.h.

    @@ -534,7 +534,7 @@
    -

    Solve and analysis structure for the lower triangular matrix $L$.

    +

    Solve and analysis structure for the lower triangular matrix $L$.

    Definition at line 186 of file cuda_precondition.h.

    @@ -750,7 +750,7 @@
    -

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    +

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    Definition at line 233 of file cuda_precondition.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 2024-03-17 21:57:18.551080545 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 2024-03-17 21:57:18.555080570 +0000 @@ -482,7 +482,7 @@
    -

    cuSPARSE description of the lower triangular matrix $L$.

    +

    cuSPARSE description of the lower triangular matrix $L$.

    Definition at line 388 of file cuda_precondition.h.

    @@ -563,7 +563,7 @@
    -

    Solve and analysis structure for the lower triangular matrix $L$.

    +

    Solve and analysis structure for the lower triangular matrix $L$.

    Definition at line 403 of file cuda_precondition.h.

    @@ -779,7 +779,7 @@
    -

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    +

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    Definition at line 450 of file cuda_precondition.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-03-17 21:57:18.603080866 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-03-17 21:57:18.611080916 +0000 @@ -775,7 +775,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    Definition at line 512 of file cuda_sparse_matrix.cc.

    @@ -798,7 +798,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    Definition at line 530 of file cuda_sparse_matrix.cc.

    @@ -821,7 +821,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    +

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    Definition at line 548 of file cuda_sparse_matrix.cc.

    @@ -844,7 +844,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    +

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    Definition at line 566 of file cuda_sparse_matrix.cc.

    @@ -866,7 +866,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    Definition at line 584 of file cuda_sparse_matrix.cc.

    @@ -890,7 +890,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Definition at line 597 of file cuda_sparse_matrix.cc.

    @@ -918,8 +918,8 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & b&#href_anchor"memdoc"> -

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    -

    Source $x$ and destination $dst$ must not be the same vector.

    +

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    +

    Source $x$ and destination $dst$ must not be the same vector.

    Definition at line 611 of file cuda_sparse_matrix.cc.

    @@ -941,8 +941,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    Definition at line 626 of file cuda_sparse_matrix.cc.

    @@ -964,8 +964,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$.

    Definition at line 645 of file cuda_sparse_matrix.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 2024-03-17 21:57:18.727081632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 2024-03-17 21:57:18.731081657 +0000 @@ -4150,7 +4150,7 @@

    This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

    -

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    +

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    Note
    If dim<spacedim we first project p onto the plane.
    @@ -4213,15 +4213,15 @@
    -

    Return the barycenter (also called centroid) of the object. The barycenter for an object $K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    -\[
+<p>Return the barycenter (also called centroid) of the object. The barycenter for an object <picture><source srcset=$K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    +\[
   \mathbf x_K = \frac{1}{|K|} \int_K \mathbf x \; \textrm{d}x
-\] +\]" src="form_1482.png"/>

    where the measure of the object is given by

    -\[
+<picture><source srcset=\[
   |K| = \int_K \mathbf 1 \; \textrm{d}x.
-\] +\]" src="form_1483.png"/>

    This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-03-17 21:57:18.779081954 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-03-17 21:57:18.783081979 +0000 @@ -206,37 +206,37 @@

    Detailed Description

    template<int dim, int spacedim = dim, int chartdim = dim>
    class ChartManifold< dim, spacedim, chartdim >

    This class describes mappings that can be expressed in terms of charts. Specifically, this class with its template arguments describes a chart of dimension chartdim, which is part of a Manifold<dim,spacedim> and is used in an object of type Triangulation<dim,spacedim>: It specializes a Manifold of dimension chartdim embedded in a manifold of dimension spacedim, for which you have explicit pull_back() and push_forward() transformations. Its use is explained in great detail in step-53.

    -

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    -\[ F: \mathcal{B} \subset
-R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \] +

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    +\[ F: \mathcal{B} \subset
+R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \]

    (the push_forward() function) and that admits the inverse transformation

    -\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
-\subset R^{\text{chartdim}} \] +\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
+\subset R^{\text{chartdim}} \]

    (the pull_back() function).

    The get_new_point() function of the ChartManifold class is implemented by calling the pull_back() method for all surrounding_points, computing their weighted average in the chartdim Euclidean space, and calling the push_forward() method with the resulting point, i.e.,

    -\[
-\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \] +\[
+\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \]

    Derived classes are required to implement the push_forward() and the pull_back() methods. All other functions (with the exception of the push_forward_gradient() function, see below) that are required by mappings will then be provided by this class.

    Providing function gradients

    -

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    +

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    Only the ChartManifold::get_tangent_vector() function uses the gradient of the push-forward, but only a subset of all finite element codes actually require the computation of tangent vectors. Consequently, while derived classes need to implement the abstract virtual push_forward() and pull_back() functions of this class, they do not need to implement the virtual push_forward_gradient() function. Rather, that function has a default implementation (and consequently is not abstract, therefore not forcing derived classes to overload it), but the default implementation clearly can not compute anything useful and therefore simply triggers and exception.

    A note on the template arguments

    The dimension arguments chartdim, dim and spacedim must satisfy the following relationships:

    dim <= spacedim
    chartdim <= spacedim

    However, there is no a priori relationship between dim and chartdim. For example, if you want to describe a mapping for an edge (a 1d object) in a 2d triangulation embedded in 3d space, you could do so by parameterizing it via a line

    -\[
+<picture><source srcset=\[
      F: [0,1] \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1426.png"/>

    in which case chartdim is 1. On the other hand, there is no reason why one can't describe this as a mapping

    -\[
+<picture><source srcset=\[
      F: {\mathbb R}^3 \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1427.png"/>

    -

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    +

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    Definition at line 902 of file manifold.h.

    Member Typedef Documentation

    @@ -566,7 +566,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -600,24 +600,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -626,11 +626,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-03-17 21:57:18.843082349 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-03-17 21:57:18.847082374 +0000 @@ -1036,7 +1036,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by ChunkSparsityPattern::symmetrize().

    @@ -1367,7 +1367,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    @@ -1392,7 +1392,7 @@ const Vector< somenumber > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1441,8 +1441,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -1462,8 +1462,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -2157,7 +2157,7 @@
    -

    Return the location of entry $(i,j)$ within the val array.

    +

    Return the location of entry $(i,j)$ within the val array.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-03-17 21:57:18.907082745 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-03-17 21:57:18.907082745 +0000 @@ -1123,7 +1123,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 520 of file chunk_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-03-17 21:57:18.959083066 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-03-17 21:57:18.963083090 +0000 @@ -594,24 +594,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -620,11 +620,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classConvergenceTable.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classConvergenceTable.html 2024-03-17 21:57:19.003083338 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classConvergenceTable.html 2024-03-17 21:57:19.007083362 +0000 @@ -362,14 +362,14 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    const unsigned int dim = 2&#href_anchor"memdoc">

    Evaluate the convergence rates of the data column data_column_key due to the RateMode in relation to the reference column reference_column_key. Be sure that the value types of the table entries of the data column and the reference data column is a number, i.e. double, float, (unsigned) int, and so on.

    -

    As this class has no information on the space dimension upon which the reference column vs. the value column is based upon, it needs to be passed as last argument to this method. The default dimension for the reference column is 2, which is appropriate for the number of cells in 2d. If you work in 3d, set the number to 3. If the reference column is $1/h$, remember to set the dimension to 1 also when working in 3d to get correct rates.

    +

    As this class has no information on the space dimension upon which the reference column vs. the value column is based upon, it needs to be passed as last argument to this method. The default dimension for the reference column is 2, which is appropriate for the number of cells in 2d. If you work in 3d, set the number to 3. If the reference column is $1/h$, remember to set the dimension to 1 also when working in 3d to get correct rates.

    The new rate column and the data column will be merged to a supercolumn. The tex caption of the supercolumn will be (by default) the same as the one of the data column. This may be changed by using the set_tex_supercaption (...) function of the base class TableHandler.

    This method behaves in the following way:

    -

    If RateMode is reduction_rate, then the computed output is $
-\frac{e_{n-1}/k_{n-1}}{e_n/k_n}, $ where $k$ is the reference column (no dimension dependence!).

    -

    If RateMode is reduction_rate_log2, then the computed output is $ dim
-\frac{\log |e_{n-1}/e_{n}|}{\log |k_n/k_{n-1}|} $.

    -

    This is useful, for example, if we use as reference key the number of degrees of freedom or better, the number of cells. Assuming that the error is proportional to $ C (1/\sqrt{k})^r $ in 2d, then this method will produce the rate $r$ as a result. For general dimension, as described by the last parameter of this function, the formula needs to be $ C (1/\sqrt[dim]{k})^r $.

    +

    If RateMode is reduction_rate, then the computed output is $
+\frac{e_{n-1}/k_{n-1}}{e_n/k_n}, $ where $k$ is the reference column (no dimension dependence!).

    +

    If RateMode is reduction_rate_log2, then the computed output is $ dim
+\frac{\log |e_{n-1}/e_{n}|}{\log |k_n/k_{n-1}|} $.

    +

    This is useful, for example, if we use as reference key the number of degrees of freedom or better, the number of cells. Assuming that the error is proportional to $ C (1/\sqrt{k})^r $ in 2d, then this method will produce the rate $r$ as a result. For general dimension, as described by the last parameter of this function, the formula needs to be $ C (1/\sqrt[dim]{k})^r $.

    Note
    Since this function adds columns to the table after several rows have already been filled, it switches off the auto fill mode of the TableHandler base class. If you intend to add further data with auto fill, you will have to re-enable it after calling this function.

    Definition at line 23 of file convergence_table.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-03-17 21:57:19.055083659 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-03-17 21:57:19.059083684 +0000 @@ -413,7 +413,7 @@
    -

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Implements ChartManifold< dim, spacedim, chartdim >.

    @@ -445,7 +445,7 @@
    -

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1144 of file manifold_lib.cc.

    @@ -475,7 +475,7 @@
    -

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1164 of file manifold_lib.cc.

    @@ -644,7 +644,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -678,24 +678,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -704,11 +704,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-03-17 21:57:19.099083930 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-03-17 21:57:19.103083955 +0000 @@ -183,7 +183,7 @@

    As a consequence, DataOut is forced to take things apart into their real and imaginary parts, and both are output as separate quantities. This is the case for data that is written directly to a file by DataOut, but it is also the case for data that is first routed through DataPostprocessor objects (or objects of their derived classes): All these objects see is a collection of real values, even if the underlying solution vector was complex-valued.

    All of this has two implications:

    • If a solution vector is complex-valued, then this results in at least two input components at each evaluation point. As a consequence, the DataPostprocessor::evaluate_scalar_field() function is never called, even if the underlying finite element had only a single solution component. Instead, DataOut will always call DataPostprocessor::evaluate_vector_field().
    • -
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.
    • +
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.

    step-58 provides an example of how this class (or, rather, the derived DataPostprocessorScalar class) is used in a complex-valued situation.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-03-17 21:57:19.139084177 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-03-17 21:57:19.143084202 +0000 @@ -255,7 +255,7 @@

    These pictures show an ellipse representing the gradient tensor at, on average, every tenth mesh point. You may want to read through the documentation of the VisIt visualization program (see https://wci.llnl.gov/simulation/computer-codes/visit/) for an interpretation of how exactly tensors are visualizated.

    -

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    +

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    class StrainPostprocessor : public DataPostprocessorTensor<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-03-17 21:57:19.179084425 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-03-17 21:57:19.187084474 +0000 @@ -246,7 +246,7 @@

    In the second image, the background color corresponds to the magnitude of the gradient vector and the vector glyphs to the gradient itself. It may be surprising at first to see that from each vertex, multiple vectors originate, going in different directions. But that is because the solution is only continuous: in general, the gradient is discontinuous across edges, and so the multiple vectors originating from each vertex simply represent the differing gradients of the solution at each adjacent cell.

    -

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    +

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    class HeatFluxPostprocessor : public DataPostprocessorVector<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-03-17 21:57:19.215084647 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-03-17 21:57:19.219084672 +0000 @@ -235,7 +235,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 492 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-03-17 21:57:19.239084795 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-03-17 21:57:19.243084820 +0000 @@ -230,7 +230,7 @@
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 631 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-03-17 21:57:19.279085043 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-03-17 21:57:19.287085092 +0000 @@ -164,24 +164,24 @@ DerivativeForm< 1, spacedim, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">transpose (const DerivativeForm< 1, dim, spacedim, Number > &DF) &#href_anchor"details" id="details">

    Detailed Description

    template<int order, int dim, int spacedim, typename Number = double>
    -class DerivativeForm< order, dim, spacedim, Number >

    This class represents the (tangential) derivatives of a function $ \mathbf F:
-{\mathbb R}^{\text{dim}} \rightarrow {\mathbb R}^{\text{spacedim}}$. Such functions are always used to map the reference dim-dimensional cell into spacedim-dimensional space. For such objects, the first derivative of the function is a linear map from ${\mathbb R}^{\text{dim}}$ to ${\mathbb
-R}^{\text{spacedim}}$, i.e., it can be represented as a matrix in ${\mathbb
-R}^{\text{spacedim}\times \text{dim}}$. This makes sense since one would represent the first derivative, $\nabla \mathbf F(\mathbf x)$ with $\mathbf
+class DerivativeForm< order, dim, spacedim, Number ></div><p>This class represents the (tangential) derivatives of a function <picture><source srcset=$ \mathbf F:
+{\mathbb R}^{\text{dim}} \rightarrow {\mathbb R}^{\text{spacedim}}$. Such functions are always used to map the reference dim-dimensional cell into spacedim-dimensional space. For such objects, the first derivative of the function is a linear map from ${\mathbb R}^{\text{dim}}$ to ${\mathbb
+R}^{\text{spacedim}}$, i.e., it can be represented as a matrix in ${\mathbb
+R}^{\text{spacedim}\times \text{dim}}$. This makes sense since one would represent the first derivative, $\nabla \mathbf F(\mathbf x)$ with $\mathbf
 x\in
-{\mathbb R}^{\text{dim}}$, in such a way that the directional derivative in direction $\mathbf d\in {\mathbb R}^{\text{dim}}$ so that

    -\begin{align*}
+{\mathbb R}^{\text{dim}}$, in such a way that the directional derivative in direction $\mathbf d\in {\mathbb R}^{\text{dim}}$ so that

    +\begin{align*}
   \nabla \mathbf F(\mathbf x) \mathbf d
   = \lim_{\varepsilon\rightarrow 0}
     \frac{\mathbf F(\mathbf x + \varepsilon \mathbf d) - \mathbf F(\mathbf
 x)}{\varepsilon},
-\end{align*} +\end{align*}" src="form_387.png"/>

    -

    i.e., one needs to be able to multiply the matrix $\nabla \mathbf F(\mathbf
-x)$ by a vector in ${\mathbb R}^{\text{dim}}$, and the result is a difference of function values, which are in ${\mathbb R}^{\text{spacedim}}$. Consequently, the matrix must be of size $\text{spacedim}\times\text{dim}$.

    -

    Similarly, the second derivative is a bilinear map from ${\mathbb
-R}^{\text{dim}} \times  {\mathbb R}^{\text{dim}}$ to ${\mathbb
-R}^{\text{spacedim}}$, which one can think of a rank-3 object of size $\text{spacedim}\times\text{dim}\times\text{dim}$.

    +

    i.e., one needs to be able to multiply the matrix $\nabla \mathbf F(\mathbf
+x)$ by a vector in ${\mathbb R}^{\text{dim}}$, and the result is a difference of function values, which are in ${\mathbb R}^{\text{spacedim}}$. Consequently, the matrix must be of size $\text{spacedim}\times\text{dim}$.

    +

    Similarly, the second derivative is a bilinear map from ${\mathbb
+R}^{\text{dim}} \times  {\mathbb R}^{\text{dim}}$ to ${\mathbb
+R}^{\text{spacedim}}$, which one can think of a rank-3 object of size $\text{spacedim}\times\text{dim}\times\text{dim}$.

    In deal.II we represent these derivatives using objects of type DerivativeForm<1,dim,spacedim,Number>, DerivativeForm<2,dim,spacedim,Number> and so on.

    Definition at line 58 of file derivative_form.h.

    @@ -393,7 +393,7 @@
    -

    Converts a DerivativeForm <order, dim, dim, Number> to Tensor<order+1, dim, Number>. In particular, if order == 1 and the derivative is the Jacobian of $\mathbf F(\mathbf x)$, then Tensor[i] = $\nabla F_i(\mathbf x)$.

    +

    Converts a DerivativeForm <order, dim, dim, Number> to Tensor<order+1, dim, Number>. In particular, if order == 1 and the derivative is the Jacobian of $\mathbf F(\mathbf x)$, then Tensor[i] = $\nabla F_i(\mathbf x)$.

    @@ -453,8 +453,8 @@
    -

    Compute the Frobenius norm of this form, i.e., the expression $\sqrt{\sum_{ij} |DF_{ij}|^2} =
-\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$.

    +

    Compute the Frobenius norm of this form, i.e., the expression $\sqrt{\sum_{ij} |DF_{ij}|^2} =
+\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$.

    @@ -474,7 +474,7 @@
    -

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    +

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    @@ -494,9 +494,9 @@
    -

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
-F$ is a square matrix (i.e., $\mathbf F:
-{\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    +

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
+F$ is a square matrix (i.e., $\mathbf F:
+{\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    @@ -552,7 +552,7 @@
    -

    Auxiliary function that computes $A T^{T}$ where A represents the current object.

    +

    Auxiliary function that computes $A T^{T}$ where A represents the current object.

    @@ -581,21 +581,21 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    -\[
+<p>One of the uses of <a class=DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +\[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
   \mathbf F(\mathbf x + \Delta \mathbf x) - \mathbf F(\mathbf x).
-\] +\]" src="form_396.png"/>

    The transformation corresponds to

    -\[
+<picture><source srcset=\[
   [\text{result}]_{i_1,\dots,i_k} = i\sum_{j}
   \left[\nabla \mathbf F(\mathbf x)\right]_{i_1,\dots,i_k, j}
   \Delta x_j
-\] +\]" src="form_397.png"/>

    -

    in index notation and corresponds to $[\Delta \mathbf x] [\nabla \mathbf F(\mathbf x)]^T$ in matrix notation.

    +

    in index notation and corresponds to $[\Delta \mathbf x] [\nabla \mathbf F(\mathbf x)]^T$ in matrix notation.

    Definition at line 454 of file derivative_form.h.

    @@ -625,7 +625,7 @@
    -

    Similar to the previous apply_transformation(). Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    +

    Similar to the previous apply_transformation(). Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    Definition at line 479 of file derivative_form.h.

    @@ -655,7 +655,7 @@
    -

    Similar to the previous apply_transformation(), specialized for the case dim == spacedim where we can return a rank-2 tensor instead of the more general DerivativeForm. Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    +

    Similar to the previous apply_transformation(), specialized for the case dim == spacedim where we can return a rank-2 tensor instead of the more general DerivativeForm. Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    Definition at line 505 of file derivative_form.h.

    @@ -715,11 +715,11 @@
    -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    -\[
+<p>Similar to the previous <a class=apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +\[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
-\] +\]" src="form_404.png"/>

    Definition at line 565 of file derivative_form.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html 2024-03-17 21:57:19.339085413 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html 2024-03-17 21:57:19.339085413 +0000 @@ -514,7 +514,7 @@
    -

    Compute the value of the residual vector field $\mathbf{r}(\mathbf{X})$.

    +

    Compute the value of the residual vector field $\mathbf{r}(\mathbf{X})$.

    Parameters
    @@ -552,9 +552,9 @@
    [out]residualA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values.

    Compute the gradient (first derivative) of the residual vector field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_904.png"/>

    Parameters
    @@ -1295,7 +1295,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1336,7 +1336,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1411,7 +1411,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1542,7 +1542,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2024-03-17 21:57:19.403085809 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2024-03-17 21:57:19.411085858 +0000 @@ -440,11 +440,11 @@

    The constructor for the class.

    Parameters
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    - +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\Psi(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\Psi(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    -
    Note
    There is only one dependent variable associated with the total energy attributed to the local finite element. That is to say, this class assumes that the (local) right hand side and matrix contribution is computed from the first and second derivatives of a scalar function $\Psi(\mathbf{X})$.
    +
    Note
    There is only one dependent variable associated with the total energy attributed to the local finite element. That is to say, this class assumes that the (local) right hand side and matrix contribution is computed from the first and second derivatives of a scalar function $\Psi(\mathbf{X})$.

    Definition at line 793 of file ad_helpers.cc.

    @@ -495,7 +495,7 @@
    -

    Register the definition of the total cell energy $\Psi(\mathbf{X})$.

    +

    Register the definition of the total cell energy $\Psi(\mathbf{X})$.

    Parameters
    @@ -527,9 +527,9 @@
    [in]energyA recorded function that defines the total cell energy. This represents the single dependent variable from which both the residual and its linearization are to be computed.

    Evaluation of the total scalar energy functional for a chosen set of degree of freedom values, i.e.

    -\[
+<picture><source srcset=\[
   \Psi(\mathbf{X}) \vert_{\mathbf{X}}
-\] +\]" src="form_906.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Returns
    The value of the energy functional at the evaluation point corresponding to a chosen set of local degree of freedom values.
    @@ -562,12 +562,12 @@
    -

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    -\[
+<p>Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function <picture><source srcset=$\Psi$ with respect to all independent variables, i.e.

    +\[
   \mathbf{r}(\mathbf{X}) =
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
 \Big\vert_{\mathbf{X}}
-\] +\]" src="form_907.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -607,13 +607,13 @@
    -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    -\[
+<p>Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function <picture><source srcset=$\Psi$ with respect to all independent variables, i.e.

    +\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
     =
 \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{X}
 \otimes \partial\mathbf{X}} \Big\vert_{\mathbf{X}}
-\] +\]" src="form_908.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -1551,7 +1551,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1592,7 +1592,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1667,7 +1667,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1798,7 +1798,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 2024-03-17 21:57:19.463086179 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 2024-03-17 21:57:19.463086179 +0000 @@ -991,7 +991,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1032,7 +1032,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1107,7 +1107,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1238,7 +1238,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2024-03-17 21:57:19.523086550 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2024-03-17 21:57:19.527086575 +0000 @@ -437,7 +437,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    const ExtractorType & extractor&#href_anchor"memdoc"> -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -551,7 +551,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    const ExtractorType & extractor&#href_anchor"memdoc"> -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -600,7 +600,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1353,7 +1353,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1394,7 +1394,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1469,7 +1469,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1600,7 +1600,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html 2024-03-17 21:57:19.591086970 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html 2024-03-17 21:57:19.599087019 +0000 @@ -454,8 +454,8 @@

    The constructor for the class.

    Parameters
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of outputs $\mathbf{r}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of outputs $\mathbf{r}$, i.e., the dimension of the image space.
    @@ -509,7 +509,7 @@
    -

    Register the definition of the cell residual vector $\mathbf{r}(\mathbf{X})$.

    +

    Register the definition of the cell residual vector $\mathbf{r}(\mathbf{X})$.

    Parameters
    @@ -549,9 +549,9 @@
    [in]residualA vector of recorded functions that defines the residual. The components of this vector represents the dependent variables.

    Evaluation of the residual for a chosen set of degree of freedom values. This corresponds to the computation of the residual vector, i.e.

    -\[
+<picture><source srcset=\[
   \mathbf{r}(\mathbf{X}) \vert_{\mathbf{X}}
-\] +\]" src="form_910.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -591,10 +591,10 @@
    -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the residual vector $\mathbf{r}$ with respect to all independent variables, i.e.

    -\[
+<p>Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the residual vector <picture><source srcset=$\mathbf{r}$ with respect to all independent variables, i.e.

    +\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_904.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -1532,7 +1532,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1573,7 +1573,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1648,7 +1648,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1779,7 +1779,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 2024-03-17 21:57:19.667087439 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 2024-03-17 21:57:19.667087439 +0000 @@ -520,7 +520,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    Register the definition of the scalar field $\Psi(\mathbf{X})$.

    +

    Register the definition of the scalar field $\Psi(\mathbf{X})$.

    Parameters
    @@ -551,7 +551,7 @@
    [in]funcThe recorded function that defines a dependent variable.
    -

    Compute the value of the scalar field $\Psi(\mathbf{X})$ using the tape as opposed to executing the source code.

    +

    Compute the value of the scalar field $\Psi(\mathbf{X})$ using the tape as opposed to executing the source code.

    Returns
    A scalar object with the value for the scalar field evaluated at the point defined by the independent variable values.

    Definition at line 1348 of file ad_helpers.cc.

    @@ -575,9 +575,9 @@

    Compute the gradient (first derivative) of the scalar field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_912.png"/>

    Parameters
    @@ -607,10 +607,10 @@

    Compute the Hessian (second derivative) of the scalar field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{X} \otimes
 \partial\mathbf{X}}
-\] +\]" src="form_913.png"/>

    Parameters
    @@ -653,10 +653,10 @@
    -

    Extract the function gradient for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function gradient for a subset of independent variables <picture><source srcset=$\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_914.png"/>

    Parameters
    @@ -704,13 +704,13 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right] =
 \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{B} \otimes
 \partial\mathbf{A}}
-\] +\]" src="form_916.png"/>

    Parameters
    @@ -753,11 +753,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_917.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Hessian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -794,11 +794,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_917.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    @@ -919,7 +919,7 @@
    -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1064,7 +1064,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1113,7 +1113,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1155,7 +1155,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1907,7 +1907,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1982,7 +1982,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -2113,7 +2113,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2024-03-17 21:57:19.735087859 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2024-03-17 21:57:19.743087909 +0000 @@ -524,7 +524,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -558,7 +558,7 @@
    [in]funcsA vector of recorded functions that defines the dependent variables.
    const ExtractorType & extractor&#href_anchor"memdoc"> -

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    +

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    Parameters
    @@ -588,7 +588,7 @@
    [in]funcsThe recorded functions that define a set of dependent variables.
    -

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -617,10 +617,10 @@
    [out]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values. The output values vector has a length corresponding to n_dependent_variables.

    Compute the Jacobian (first derivative) of the vector field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \mathbf{J}(\boldsymbol{\Psi})
      = \frac{\partial\boldsymbol{\Psi}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_920.png"/>

    Parameters
    @@ -663,7 +663,7 @@
    -

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    +

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -709,13 +709,13 @@
    [in]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values.
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    -

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    +

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    Parameters
    @@ -757,11 +757,11 @@
    [in]jacobianThe Jacobian of the vector function with respect to all independent variables, i.e., that returned by compute_jacobian().
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Jacobian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -798,11 +798,11 @@
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    @@ -923,7 +923,7 @@
    -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1068,7 +1068,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1117,7 +1117,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1159,7 +1159,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1911,7 +1911,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1986,7 +1986,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -2117,7 +2117,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 2024-03-17 21:57:19.775088107 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 2024-03-17 21:57:19.783088156 +0000 @@ -185,7 +185,7 @@

    Since time is marched forward in a discrete manner in our simulations, we need to discuss how we increment time. During time stepping we enter two separate alternating regimes in every step.

    • The snapshot stage (the current stage, the consistent stage): In this part of the algorithm, we are at $t = t_n$ and all quantities of the simulation (displacements, strains, temperatures, etc.) are up-to-date for $t = t_n$. In this stage, current time refers to $t_n$, next time refers to $t_{n+1}$, previous time refers to $t_{n-1}$. The other useful notation quantities are the next time step size $t_{n+1} - t_n$ and previous time step size $t_n - t_{n-1}$. In this stage, it is a perfect occasion to generate text output using print commands within the user's code. Additionally, post-processed outputs can be prepared here, which can then later be viewed by visualization programs such as Tecplot, Paraview, and VisIt. Additionally, during the snapshot stage, the code can assess the quality of the previous step and decide whether it wants to increase or decrease the time step size. The step size for the next time step can be modified here, by calling set_desired_next_step_size().
    • -
    • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

      +
    • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

      The question arises whether time should be incremented before updating state quantities. Multiple possibilities exist, depending on program and formulation requirements, and possibly the programmer's preferences:

      • Time is incremented before the rest of the updates. In this case, even though time is incremented to $t_{n+1}$, not all variables are updated yet. During this update phase, $dt$ equals the previous time step size. Previous means that it is referring to the $dt$ of the advance_time() command that was performed previously. In the following example code, we are assuming that a and b are two state variables that need to be updated in this time step.
        time.advance_time();
        new_a = update_a(a, b, time.get_previous_step_size());
        /usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-03-17 21:57:19.859088625 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-03-17 21:57:19.863088650 +0000 @@ -417,7 +417,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.

    Detailed Description

    template<int dim, int spacedim = dim>
    -class DoFHandler< dim, spacedim >

    Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

    +class DoFHandler< dim, spacedim >

    Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

    It is first used in the step-2 tutorial program.

    For each 0d, 1d, 2d, and 3d subobject, this class stores a list of the indices of degrees of freedom defined on this DoFHandler. These indices refer to the unconstrained degrees of freedom, i.e. constrained degrees of freedom are numbered in the same way as unconstrained ones, and are only later eliminated. This leads to the fact that indices in global vectors and matrices also refer to all degrees of freedom and some kind of condensation is needed to restrict the systems of equations to the unconstrained degrees of freedom only. The actual layout of storage of the indices is described in the internal::DoFHandlerImplementation::DoFLevel class documentation.

    The class offers iterators to traverse all cells, in much the same way as the Triangulation class does. Using the begin() and end() functions (and companions, like begin_active()), one can obtain iterators to walk over cells, and query the degree of freedom structures as well as the triangulation data. These iterators are built on top of those of the Triangulation class, but offer the additional information on degrees of freedom functionality compared to pure triangulation iterators. The order in which dof iterators are presented by the ++ and -- operators is the same as that for the corresponding iterators traversing the triangulation on which this DoFHandler is constructed.

    @@ -434,7 +434,7 @@

    Like many other classes in deal.II, the DoFHandler class can stream its contents to an archive using BOOST's serialization facilities. The data so stored can later be retrieved again from the archive to restore the contents of this object. This facility is frequently used to save the state of a program to disk for possible later resurrection, often in the context of checkpoint/restart strategies for long running computations or on computers that aren't very reliable (e.g. on very large clusters where individual nodes occasionally fail and then bring down an entire MPI job).

    The model for doing so is similar for the DoFHandler class as it is for the Triangulation class (see the section in the general documentation of that class). In particular, the load() function does not exactly restore the same state as was stored previously using the save() function. Rather, the function assumes that you load data into a DoFHandler object that is already associated with a triangulation that has a content that matches the one that was used when the data was saved. Likewise, the load() function assumes that the current object is already associated with a finite element object that matches the one that was associated with it when data was saved; the latter can be achieved by calling DoFHandler::distribute_dofs() using the same kind of finite element before re-loading data from the serialization archive.

    hp-adaptive finite element methods

    -

    Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

    +

    Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

    The whole process of working with objects of this type is explained in step-27. Many of the algorithms this class implements are described in the hp-paper.

    Active FE indices and their behavior under mesh refinement

    The typical workflow for using this class is to create a mesh, assign an active FE index to every active cell, call DoFHandler::distribute_dofs(), and then assemble a linear system and solve a problem on this finite element space.

    @@ -983,7 +983,7 @@
    -

    Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

    +

    Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

    The exact order in which degrees of freedom on a mesh are ordered, i.e., the order in which basis functions of the finite element space are enumerated, is something that deal.II treats as an implementation detail. By and large, degrees of freedom are enumerated in the same order in which we traverse cells, but you should not rely on any specific numbering. In contrast, if you want a particular ordering, use the functions in namespace DoFRenumbering.

    This function is first discussed in the introduction to the step-2 tutorial program.

    Note
    This function makes a copy of the finite element given as argument, and stores it as a member variable, similarly to the above function set_fe().
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-03-17 21:57:19.911088946 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-03-17 21:57:19.915088971 +0000 @@ -1106,7 +1106,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

    Definition at line 567 of file dynamic_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-03-17 21:57:19.947089169 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-03-17 21:57:19.955089218 +0000 @@ -191,7 +191,7 @@
    template<typename VectorType = Vector<double>>
    class EigenInverse< VectorType >

    Inverse iteration (Wieland) for eigenvalue computations.

    This class implements an adaptive version of the inverse iteration by Wieland.

    -

    There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

    +

    There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

    Usually, the initial guess entering this method is updated after each step, replacing it with the new approximation of the eigenvalue. Using a parameter AdditionalData::relaxation between 0 and 1, this update can be damped. With relaxation parameter 0, no update is performed. This damping allows for slower adaption of the shift value to make sure that the method converges to the eigenvalue closest to the initial guess. This can be aided by the parameter AdditionalData::start_adaption, which indicates the first iteration step in which the shift value should be adapted.

    Definition at line 129 of file eigen.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-03-17 21:57:19.987089416 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-03-17 21:57:19.995089466 +0000 @@ -190,7 +190,7 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class EigenPower< VectorType >

    Power method (von Mises) for eigenvalue computations.

    -

    This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

    +

    This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

    A shift parameter allows to shift the spectrum, so it is possible to compute the smallest eigenvalue, too.

    Convergence of this method is known to be slow.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-03-17 21:57:20.051089811 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-03-17 21:57:20.055089837 +0000 @@ -223,16 +223,16 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    class EllipticalManifold< dim, spacedim >

    Elliptical manifold description derived from ChartManifold. More information on the elliptical coordinate system can be found at Wikipedia .

    -

    This is based on the definition of elliptic coordinates $(u,v)$

    -\[
+<p>This is based on the definition of elliptic coordinates <picture><source srcset=$(u,v)$

    +\[
  \left\lbrace\begin{aligned}
  x &=  x_0 + c \cosh(u) \cos(v) \\
  y &=  y_0 + c \sinh(u) \sin(v)
  \end{aligned}\right.
-\] +\]" src="form_1454.png"/>

    -

    in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

    -

    The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

    +

    in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

    +

    The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

    The constructor of this class will throw an exception if both dim and spacedim are different from two.

    This manifold can be used to produce hyper_shells with elliptical curvature. As an example, the test elliptical_manifold_01 produces the following triangulation:

    @@ -348,7 +348,7 @@ - +
    centerCenter of the manifold.
    major_axis_directionDirection of the major axis of the manifold.
    eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
    eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
    @@ -485,7 +485,7 @@

    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -520,7 +520,7 @@

    Return the periodicity associated with the submanifold.

    -

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    +

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    Definition at line 1244 of file manifold_lib.cc.

    @@ -748,7 +748,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -782,24 +782,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -808,11 +808,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-03-17 21:57:20.187090651 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-03-17 21:57:20.183090626 +0000 @@ -459,7 +459,7 @@
    unsigned int cell_index

    Likewise, a gradient of the finite element solution represented by vector can be interpolated to the quadrature points by fe_eval.get_gradient(q). The combination of read_dof_values(), evaluate() and get_value() is similar to what FEValues::get_function_values or FEValues::get_function_gradients does, but it is in general much faster because it makes use of the tensor product, see the description of the evaluation routines below, and can do this operation for several cells at once through vectorization.

    -

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    +

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    Function<dim> &function = ...;
    for (unsigned int cell_index = cell_range.first;
    @@ -1944,8 +1944,8 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -2209,7 +2209,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2659,8 +2659,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html 2024-03-17 21:57:20.283091244 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html 2024-03-17 21:57:20.291091294 +0000 @@ -1154,8 +1154,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1419,7 +1419,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1869,8 +1869,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-03-17 21:57:20.391091912 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-03-17 21:57:20.399091961 +0000 @@ -940,8 +940,8 @@

    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1567,7 +1567,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1966,8 +1966,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-03-17 21:57:20.499092579 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-03-17 21:57:20.507092628 +0000 @@ -914,8 +914,8 @@

    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1494,7 +1494,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1893,8 +1893,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-03-17 21:57:20.607093246 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-03-17 21:57:20.615093295 +0000 @@ -860,7 +860,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    @@ -1339,8 +1339,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1790,8 +1790,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-03-17 21:57:20.715093913 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-03-17 21:57:20.719093938 +0000 @@ -1053,8 +1053,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1249,7 +1249,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1690,8 +1690,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-03-17 21:57:20.795094407 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-03-17 21:57:20.799094433 +0000 @@ -768,8 +768,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-03-17 21:57:20.911095124 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-03-17 21:57:20.907095099 +0000 @@ -1579,8 +1579,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1844,7 +1844,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2294,8 +2294,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-03-17 21:57:21.015095767 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-03-17 21:57:21.019095791 +0000 @@ -931,7 +931,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -972,7 +972,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1011,7 +1011,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1213,17 +1213,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1259,7 +1259,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1431,16 +1431,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1476,7 +1476,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1595,11 +1595,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1640,7 +1640,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1759,11 +1759,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1801,7 +1801,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1964,11 +1964,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2009,7 +2009,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2350,7 +2350,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2407,7 +2407,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2465,7 +2465,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2523,7 +2523,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2581,7 +2581,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-03-17 21:57:21.127096458 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-03-17 21:57:21.127096458 +0000 @@ -649,7 +649,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -690,7 +690,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -729,7 +729,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -931,17 +931,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -977,7 +977,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1149,16 +1149,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1194,7 +1194,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1313,11 +1313,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1358,7 +1358,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1477,11 +1477,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1519,7 +1519,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1682,11 +1682,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1727,7 +1727,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2068,7 +2068,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2125,7 +2125,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2183,7 +2183,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2241,7 +2241,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2299,7 +2299,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-03-17 21:57:21.199096904 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-03-17 21:57:21.207096952 +0000 @@ -488,8 +488,8 @@
  • If the q_index and mapping_index arguments to this function are explicitly specified (rather than leaving them at their default values), then these indices will be used to select which element of the hp::QCollection and hp::MappingCollection passed to the constructor should serve as the quadrature and mapping to be used.
  • If one of these arguments is left at its default value, then the function will need to choose a quadrature and/or mapping that is appropriate for the two finite element spaces used on the two cells adjacent to the current interface. As the first choice, if the quadrature or mapping collection we are considering has only one element, then that is clearly the one that should be used.
  • If the quadrature or mapping collection have multiple elements, then we need to dig further. For quadrature objects, we can compare whether the two quadrature objects that correspond to the active_fe_index values of the two adjacent cells are identical (i.e., have quadrature points at the same locations, and have the same weights). If this is so, then it does not matter which one of the two we take, and we choose one or the other.
  • -
  • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
      -
    • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
    • +
    • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
        +
      • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
      • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • @@ -825,7 +825,7 @@
  • Mapped quadrature weight. This value equals the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1097,9 +1097,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    Note that one can define the jump in different ways (the value "there" minus the value "here", or the other way around; both are used in the finite element literature). The definition here uses "value here minus value there", as seen from the first cell.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1155,9 +1155,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
-\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
+\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -1213,9 +1213,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
-u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    +

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
+u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1271,9 +1271,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1329,9 +1329,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
-\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    +

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
+\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1387,9 +1387,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
-u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
+u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values for the gradient) of the shape function (singular)".
    @@ -1445,10 +1445,10 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the Hessians (plural: one or two possible values for the second derivatives) of the shape function (singular)".
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-03-17 21:57:21.263097299 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-03-17 21:57:21.271097347 +0000 @@ -454,7 +454,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -500,7 +500,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -546,8 +546,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -593,8 +593,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -640,7 +640,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -708,7 +708,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -754,9 +754,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -811,7 +811,7 @@ std::vector< solution_value_type< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -878,7 +878,7 @@ std::vector< solution_value_type< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    Return the jump in the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -926,7 +926,7 @@ std::vector< solution_gradient_type< typename InputVector::value_type > > & gradients&#href_anchor"memdoc">

    Return the jump in the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -974,7 +974,7 @@ std::vector< solution_hessian_type< typename InputVector::value_type > > & hessians&#href_anchor"memdoc">

    Return the jump in the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1022,7 +1022,7 @@ std::vector< solution_third_derivative_type< typename InputVector::value_type > > & third_derivatives&#href_anchor"memdoc">

    Return the jump in the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1070,7 +1070,7 @@ std::vector< solution_value_type< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    Return the average of the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1118,7 +1118,7 @@ std::vector< solution_gradient_type< typename InputVector::value_type > > & gradients&#href_anchor"memdoc">

    Return the average of the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1166,7 +1166,7 @@ std::vector< solution_hessian_type< typename InputVector::value_type > > & hessians&#href_anchor"memdoc">

    Return the average of the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-03-17 21:57:21.339097768 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-03-17 21:57:21.339097768 +0000 @@ -455,7 +455,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -501,8 +501,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -548,8 +548,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -595,8 +595,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -642,8 +642,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
-+ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
++ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -689,8 +689,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -736,9 +736,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -796,7 +796,7 @@

    Return the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -869,7 +869,7 @@

    Return the jump in the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -923,7 +923,7 @@

    Return the jump in the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -977,7 +977,7 @@

    Return the jump in the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1031,7 +1031,7 @@

    Return the jump in the third derivatives of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1085,7 +1085,7 @@

    Return the average of the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1139,7 +1139,7 @@

    Return the average of the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1193,7 +1193,7 @@

    Return the average of the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-03-17 21:57:21.375097990 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-03-17 21:57:21.383098040 +0000 @@ -199,25 +199,25 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESeries::Fourier< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    -\[
+class FESeries::Fourier< dim, spacedim ></div><p>A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into <a class=Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    +\[
   \int_0^1 \phi_k(x) \phi_l^\ast(x) dx=\delta_{kl}.
-\] +\]" src="form_1176.png"/>

    -

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    +

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    The arbitrary scalar FE field on the reference element can be expanded in the complete orthogonal exponential basis as

    -\[
+<picture><source srcset=\[
    u({\bf x})
    = \sum_{\bf k} c_{\bf k} \phi_{\bf k}({\bf x}).
-\] +\]" src="form_1178.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_{\bf k} =
    \int_{[0,1]^d} u({\bf x}) \phi_{\bf k}^\ast ({\bf x}) d{\bf x}\,.
-\] +\]" src="form_1179.png"/>

    -

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    +

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    Definition at line 90 of file fe_series.h.

    Member Typedef Documentation

    @@ -822,7 +822,7 @@
    -

    Angular frequencies $ 2 \pi {\bf k} $ .

    +

    Angular frequencies $ 2 \pi {\bf k} $ .

    Definition at line 196 of file fe_series.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-03-17 21:57:21.427098311 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-03-17 21:57:21.423098286 +0000 @@ -196,39 +196,39 @@
    template<int dim, int spacedim = dim>
    class FESeries::Legendre< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into series of Legendre functions on a reference element.

    Legendre functions are solutions to Legendre's differential equation

    -\[
+<picture><source srcset=\[
    \frac{d}{dx}\left([1-x^2] \frac{d}{dx} P_n(x)\right) +
    n[n+1] P_n(x) = 0
-\] +\]" src="form_1185.png"/>

    and can be expressed using Rodrigues' formula

    -\[
+<picture><source srcset=\[
    P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n}[x^2-1]^n.
-\] +\]" src="form_1186.png"/>

    -

    These polynomials are orthogonal with respect to the $ L^2 $ inner product on the interval $ [-1;1] $

    -\[
+<p> These polynomials are orthogonal with respect to the <picture><source srcset=$ L^2 $ inner product on the interval $ [-1;1] $

    +\[
    \int_{-1}^1 P_m(x) P_n(x) = \frac{2}{2n + 1} \delta_{mn}
-\] +\]" src="form_1189.png"/>

    -

    and are complete. A family of $ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    -\[
+<p> and are complete. A family of <picture><source srcset=$ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    +\[
    \widetilde P_m = \sqrt{2} P_m(2x-1).
-\] +\]" src="form_1191.png"/>

    -

    An arbitrary scalar FE field on the reference element $ [0;1] $ can be expanded in the complete orthogonal basis as

    -\[
+<p>An arbitrary scalar FE field on the reference element <picture><source srcset=$ [0;1] $ can be expanded in the complete orthogonal basis as

    +\[
    u(x)
    = \sum_{m} c_m \widetilde P_{m}(x).
-\] +\]" src="form_1192.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_m = \frac{2m+1}{2}
    \int_0^1 u(x) \widetilde P_m(x) dx .
-\] +\]" src="form_1193.png"/>

    -

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    +

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    Definition at line 260 of file fe_series.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-03-17 21:57:21.531098954 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-03-17 21:57:21.531098954 +0000 @@ -959,7 +959,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1000,7 +1000,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1039,7 +1039,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1241,17 +1241,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1287,7 +1287,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1459,16 +1459,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1504,7 +1504,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1623,11 +1623,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1668,7 +1668,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1787,11 +1787,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1829,7 +1829,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1992,11 +1992,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2037,7 +2037,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2378,7 +2378,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2435,7 +2435,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2493,7 +2493,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2551,7 +2551,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2609,7 +2609,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-03-17 21:57:21.687099917 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-03-17 21:57:21.691099942 +0000 @@ -500,11 +500,11 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    +class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    FE_Q<dim>(1)); // pressure component
    Definition fe_q.h:551
    -

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    +

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

    FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);

    where now each (vector) component of the combined element corresponds to a $Q_1$ space.

    To the outside world, FESystem objects look just like a usual finite element object, they just happen to be composed of several other finite elements that are possibly of different type. These "base elements" can themselves have multiple components and, in particular, could also be vector-valued – for example, if one of the base elements is an FESystem itself (see also below). An example is given in the documentation of namespace FETools::Compositing, when using the "tensor product" strategy.

    @@ -3836,7 +3836,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3944,7 +3944,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4162,9 +4162,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-03-17 21:57:21.799100609 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-03-17 21:57:21.803100634 +0000 @@ -743,7 +743,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -784,7 +784,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -823,7 +823,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1025,17 +1025,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1071,7 +1071,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1243,16 +1243,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1288,7 +1288,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1407,11 +1407,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1452,7 +1452,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1571,11 +1571,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1613,7 +1613,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1776,11 +1776,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1821,7 +1821,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2162,7 +2162,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2219,7 +2219,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2277,7 +2277,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2335,7 +2335,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2393,7 +2393,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-03-17 21:57:21.903101252 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-03-17 21:57:21.907101277 +0000 @@ -614,7 +614,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -648,7 +648,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -680,7 +680,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -840,17 +840,17 @@ std::vector< typename InputVector::value_type > & values&#href_anchor"memdoc"> -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -879,7 +879,7 @@ std::vector< Vector< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1022,16 +1022,16 @@ std::vector< Tensor< 1, spacedim, typename InputVector::value_type > > & gradients&#href_anchor"memdoc"> -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1060,7 +1060,7 @@ std::vector< std::vector< Tensor< 1, spacedim, typename InputVector::value_type > > > & gradients&#href_anchor"memdoc">

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1158,11 +1158,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1196,7 +1196,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1294,11 +1294,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1329,7 +1329,7 @@ std::vector< Vector< typename InputVector::value_type > > & laplacians&#href_anchor"memdoc">

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1463,11 +1463,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1501,7 +1501,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -1770,7 +1770,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1811,7 +1811,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1853,7 +1853,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1895,7 +1895,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1937,7 +1937,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-03-17 21:57:21.959101598 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-03-17 21:57:21.959101598 +0000 @@ -701,7 +701,7 @@

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1545 of file fe_values.cc.

    @@ -774,7 +774,7 @@

    Return the gradients of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1600 of file fe_values.cc.

    @@ -833,7 +833,7 @@

    Return the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1654 of file fe_values.cc.

    @@ -892,7 +892,7 @@

    Return the Laplacians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1708 of file fe_values.cc.

    @@ -951,7 +951,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1762 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-03-17 21:57:21.991101795 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-03-17 21:57:21.999101845 +0000 @@ -156,9 +156,9 @@

    Detailed Description

    template<int dim, int spacedim>
    class FEValuesViews::SymmetricTensor< 2, dim, spacedim >

    A class representing a view to a set of (dim*dim + dim)/2 components forming a symmetric second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    -

    This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
-i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
-\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

    +

    This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
+i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
+\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

    You get an object of this type if you apply a FEValuesExtractors::SymmetricTensor to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 1477 of file fe_values.h.

    @@ -485,7 +485,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2263 of file fe_values.cc.

    @@ -559,7 +559,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2317 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-03-17 21:57:22.035102067 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-03-17 21:57:22.043102116 +0000 @@ -169,8 +169,8 @@

    Detailed Description

    template<int dim, int spacedim>
    class FEValuesViews::Tensor< 2, dim, spacedim >

    A class representing a view to a set of dim*dim components forming a second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    -

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
-\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

    +

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
+\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

    You get an object of this type if you apply a FEValuesExtractors::Tensor to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 1815 of file fe_values.h.

    @@ -591,7 +591,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2373 of file fe_values.cc.

    @@ -665,7 +665,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2427 of file fe_values.cc.

    @@ -724,7 +724,7 @@

    Return the gradient of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    See the general discussion of this class for a definition of the gradient.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2482 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-03-17 21:57:22.095102438 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-03-17 21:57:22.103102487 +0000 @@ -229,8 +229,8 @@
    template<int dim, int spacedim = dim>
    class FEValuesViews::Vector< dim, spacedim >

    A class representing a view to a set of spacedim components forming a vector part of a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    Note that in the current context, a vector is meant in the sense physics uses it: it has spacedim components that behave in specific ways under coordinate system transformations. Examples include velocity or displacement fields. This is opposed to how mathematics uses the word "vector" (and how we use this word in other contexts in the library, for example in the Vector class), where it really stands for a collection of numbers. An example of this latter use of the word could be the set of concentrations of chemical species in a flame; however, these are really just a collection of scalar variables, since they do not change if the coordinate system is rotated, unlike the components of a velocity vector, and consequently, this class should not be used for this context.

    -

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
-\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

    +

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
+\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

    You get an object of this type if you apply a FEValuesExtractors::Vector to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 675 of file fe_values.h.

    @@ -288,8 +288,8 @@

    An alias for the type of symmetrized gradients of the view this class represents. Here, for a set of dim components of the finite element, the symmetrized gradient is a SymmetricTensor<2,spacedim>.

    -

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
-v^T)$.

    +

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
+v^T)$.

    Definition at line 705 of file fe_values.h.

    @@ -812,8 +812,8 @@ const unsigned int q_point&#href_anchor"memdoc">

    Return the symmetric gradient (a symmetric tensor of rank 2) of the vector component selected by this view, for the shape function and quadrature point selected by the arguments.

    -

    The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
-(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    +

    The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
+(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    Note
    The meaning of the arguments is as documented for the value() function.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -862,16 +862,16 @@ const unsigned int q_point&#href_anchor"memdoc">

    Return the vector curl of the vector components selected by this view, for the shape function and quadrature point selected by the arguments. For 1d this function does not make any sense. Thus it is not implemented for spacedim=1. In 2d the curl is defined as

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \operatorname{curl}(u) \dealcoloneq \frac{du_2}{dx} -\frac{du_1}{dy},
-\end{equation*} +\end{equation*}" src="form_1247.png"/>

    whereas in 3d it is given by

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \operatorname{curl}(u) \dealcoloneq \left( \begin{array}{c}
 \frac{du_3}{dy}-\frac{du_2}{dz}\\ \frac{du_1}{dz}-\frac{du_3}{dx}\\
 \frac{du_2}{dx}-\frac{du_1}{dy} \end{array} \right).
-\end{equation*} +\end{equation*}" src="form_1248.png"/>

    Note
    The meaning of the arguments is as documented for the value() function.
    @@ -951,7 +951,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1818 of file fe_values.cc.

    @@ -1024,7 +1024,7 @@

    Return the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1872 of file fe_values.cc.

    @@ -1082,10 +1082,10 @@

    Return the symmetrized gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    -

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
-v^T)$.

    +

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
+v^T)$.

    Note
    There is no equivalent function such as FEValuesBase::get_function_symmetric_gradients in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.
    -

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1926 of file fe_values.cc.

    @@ -1144,7 +1144,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1982 of file fe_values.cc.

    @@ -1203,7 +1203,7 @@

    Return the curl of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_curls in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2037 of file fe_values.cc.

    @@ -1262,7 +1262,7 @@

    Return the Hessians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2091 of file fe_values.cc.

    @@ -1321,7 +1321,7 @@

    Return the Laplacians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2145 of file fe_values.cc.

    @@ -1380,7 +1380,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2207 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-03-17 21:57:22.251103401 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-03-17 21:57:22.255103426 +0000 @@ -748,11 +748,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2772,7 +2772,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3466,7 +3466,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3574,7 +3574,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3884,9 +3884,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-03-17 21:57:22.407104365 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-03-17 21:57:22.411104389 +0000 @@ -724,11 +724,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2712,7 +2712,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3406,7 +3406,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3514,7 +3514,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3824,9 +3824,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-03-17 21:57:22.563105328 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-03-17 21:57:22.567105353 +0000 @@ -735,11 +735,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2693,7 +2693,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3387,7 +3387,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3495,7 +3495,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3805,9 +3805,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-03-17 21:57:22.719106292 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-03-17 21:57:22.723106317 +0000 @@ -2402,17 +2402,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2447,21 +2447,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3609,7 +3609,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3717,7 +3717,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3996,9 +3996,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4033,11 +4033,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-03-17 21:57:22.871107231 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-03-17 21:57:22.879107281 +0000 @@ -2590,7 +2590,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3284,7 +3284,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3392,7 +3392,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3702,9 +3702,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3739,11 +3739,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-03-17 21:57:23.023108170 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-03-17 21:57:23.031108219 +0000 @@ -2590,7 +2590,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3284,7 +3284,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3392,7 +3392,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3702,9 +3702,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3739,11 +3739,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-03-17 21:57:23.183109158 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-03-17 21:57:23.187109183 +0000 @@ -486,10 +486,10 @@

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+<p>For this cell, a bilinear transformation <picture><source srcset=$F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
 y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    $P_0$ element

    @@ -505,7 +505,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -529,7 +529,7 @@

    -
    @@ -517,9 +517,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2366,17 +2366,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2411,21 +2411,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3040,7 +3040,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3734,7 +3734,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3842,7 +3842,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4121,9 +4121,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4158,11 +4158,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-03-17 21:57:23.355110222 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-03-17 21:57:23.355110222 +0000 @@ -496,10 +496,10 @@

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+<p>For this cell, a bilinear transformation <picture><source srcset=$F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
 y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    $P_0$ element

    @@ -515,7 +515,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -539,7 +539,7 @@

    -
    @@ -527,9 +527,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2484,17 +2484,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2529,21 +2529,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3356,7 +3356,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -4050,7 +4050,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4158,7 +4158,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4468,9 +4468,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4505,11 +4505,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-03-17 21:57:23.515111209 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-03-17 21:57:23.515111209 +0000 @@ -496,7 +496,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -520,7 +520,7 @@

    -
    @@ -508,9 +508,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2547,7 +2547,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3241,7 +3241,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3349,7 +3349,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3659,9 +3659,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3696,11 +3696,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-03-17 21:57:23.663112124 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-03-17 21:57:23.671112173 +0000 @@ -506,7 +506,7 @@ *

    with node 13 being placed in the interior of the hex.

    Note, however, that these are just the Lagrange interpolation points of the shape functions. Even though they may physically be on the boundary of the cell, they are logically in the interior since there are no continuity requirements for these shape functions across cell boundaries. While discontinuous, when restricted to a single cell the shape functions of this element are exactly the same as those of the FE_Q element where they are shown visually.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    Definition at line 112 of file fe_dgq.h.

    @@ -2308,17 +2308,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2353,21 +2353,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2884,7 +2884,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3578,7 +3578,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3686,7 +3686,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3965,9 +3965,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-03-17 21:57:23.815113063 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-03-17 21:57:23.823113112 +0000 @@ -2236,17 +2236,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2281,21 +2281,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2812,7 +2812,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3506,7 +3506,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3614,7 +3614,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3893,9 +3893,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-03-17 21:57:23.967114002 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-03-17 21:57:23.975114050 +0000 @@ -2240,17 +2240,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2285,21 +2285,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2816,7 +2816,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3510,7 +3510,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3618,7 +3618,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3897,9 +3897,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-03-17 21:57:24.115114916 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-03-17 21:57:24.119114940 +0000 @@ -2238,17 +2238,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2283,21 +2283,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2814,7 +2814,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3508,7 +3508,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3616,7 +3616,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3895,9 +3895,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-03-17 21:57:24.263115830 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-03-17 21:57:24.267115855 +0000 @@ -2590,7 +2590,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3284,7 +3284,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3392,7 +3392,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3702,9 +3702,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3739,11 +3739,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-03-17 21:57:24.411116744 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-03-17 21:57:24.415116769 +0000 @@ -2599,7 +2599,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3293,7 +3293,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3401,7 +3401,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3711,9 +3711,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3748,11 +3748,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-03-17 21:57:24.563117683 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-03-17 21:57:24.567117708 +0000 @@ -484,12 +484,12 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    class FE_Enriched< dim, spacedim >

    Implementation of a partition of unity finite element method (PUM) by Babuska and Melenk which enriches a standard finite element with an enrichment function multiplied with another (usually linear) finite element:

    -\[
+<picture><source srcset=\[
 U(\mathbf x) = \sum_i N_i(\mathbf x) U_i + \sum_j N_j(\mathbf x) \sum_k
-F_k(\mathbf x) U_{jk} \] +F_k(\mathbf x) U_{jk} \]" src="form_1077.png"/>

    -

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
-U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    +

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
+U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    The simplest constructor of this class takes two finite element objects and an enrichment function to be used. For example

    @@ -497,7 +497,7 @@
    Definition fe_q.h:551

    In this case, standard DoFs are distributed by FE_Q<dim>(2), whereas enriched DoFs are coming from a single finite element FE_Q<dim>(1) used with a single enrichment function function. In this case, the total number of DoFs on the enriched element is the sum of DoFs from FE_Q<dim>(2) and FE_Q<dim>(1).

    -

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    +

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    @@ -510,7 +510,7 @@
    1d element, base and enriched shape functions. enriched shape function corresponding to the central vertex.

    Note that evaluation of gradients (hessians) of the enriched shape functions or the finite element field requires evaluation of gradients (gradients and hessians) of the enrichment functions:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   U(\mathbf x)
     &= \sum_i N_i(\mathbf x) U_i
     + \sum_{j,k} N_j(\mathbf x) F_k(\mathbf x) U_{jk} \\
@@ -524,10 +524,10 @@
 F_k(\mathbf x) + \mathbf \nabla F_k(\mathbf x) \mathbf \nabla N_j(\mathbf x)
 + \mathbf \nabla N_j(\mathbf x) \mathbf \nabla F_k(\mathbf x) + N_j(\mathbf
 x) \mathbf \nabla \mathbf \nabla F_k(\mathbf x) \right] U_{jk}
-\end{align*} +\end{align*}" src="form_1086.png"/>

    Using enriched and non-enriched FEs together

    -

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));
    +

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));

    This constructor is equivalent to calling

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1),
    FE_Nothing<dim>(1,true),
    nullptr);
    @@ -2626,7 +2626,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3288,7 +3288,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3396,7 +3396,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3706,9 +3706,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3743,11 +3743,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-03-17 21:57:24.715118622 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-03-17 21:57:24.719118646 +0000 @@ -2660,7 +2660,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3354,7 +3354,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3462,7 +3462,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3741,9 +3741,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3778,11 +3778,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:24.855119487 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:24.859119512 +0000 @@ -2797,7 +2797,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -3441,7 +3441,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3543,7 +3543,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3800,9 +3800,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3835,11 +3835,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-03-17 21:57:24.995120351 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-03-17 21:57:25.003120401 +0000 @@ -2701,7 +2701,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3395,7 +3395,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3503,7 +3503,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3782,9 +3782,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:25.143121265 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:25.147121290 +0000 @@ -2348,7 +2348,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -2992,7 +2992,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3094,7 +3094,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3351,9 +3351,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3386,11 +3386,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-03-17 21:57:25.323122377 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-03-17 21:57:25.323122377 +0000 @@ -508,12 +508,12 @@

    Detailed Description

    template<int dim>
    class FE_Nedelec< dim >
    Warning
    Several aspects of the implementation are experimental. For the moment, it is safe to use the element on globally refined meshes with consistent orientation of faces. See the todo entries below for more detailed caveats.
    -

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    -

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    +

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    +

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    Other properties of the Nédélec element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one.

    We follow the commonly used – though confusing – definition of the "degree" of Nédélec elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_Nedelec(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

    -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -521,7 +521,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1119.png"/>

    Note that this follows the convention of Brezzi and Raviart, though not the one used in the original paper by Nédélec.

    This class is not implemented for the codimension one case (spacedim != dim).

    @@ -1373,11 +1373,11 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3489,7 +3489,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -4183,7 +4183,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4291,7 +4291,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4570,9 +4570,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-03-17 21:57:25.471123292 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-03-17 21:57:25.467123267 +0000 @@ -2253,7 +2253,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -2897,7 +2897,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -2999,7 +2999,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3283,9 +3283,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3318,11 +3318,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-03-17 21:57:25.507123514 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-03-17 21:57:25.515123563 +0000 @@ -147,9 +147,9 @@ class FE_NedelecSZ< dim, spacedim >::InternalData

    Derived Internal data which is used to store cell-independent data. Note that due to the nature of this element, a number of useful pre-computed quantities are stored for the computation of cell-dependent shape functions.

    The main quantities which are stored are associated with edge and face parameterizations. These are:

    • -$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • +$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • -$\sigma_{i}$ - linear functional associated with the $i$-th vertex.
    • +$\sigma_{i}$ - linear functional associated with the $i$-th vertex.

    The definitions of these functionals, as well as the edge and face parameterizations and edge and face extension parameters, can be found on page 82 of Zaglmayr's thesis. The details of the definition of the globally-defined edge and face orientations can be found on page 67.

    @@ -280,9 +280,9 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Storage for all possible edge parameterization between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent.

    -

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    -

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    +

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    +

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    Definition at line 287 of file fe_nedelec_sz.h.

    @@ -302,8 +302,8 @@

    Storage for gradients of all possible edge parameterizations between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent. Note that the components of the gradient are constant.

    -

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    +

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    Note that the gradient of the edge parameterization is constant on an edge, so we do not need to store it at every quadrature point.

    Definition at line 304 of file fe_nedelec_sz.h.

    @@ -366,10 +366,10 @@

    Storage for edge extension parameters at quadrature points. These are stored for the 12 edges such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
-\lambda_{j}$.

    -

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    -

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    +

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
+\lambda_{j}$.

    +

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    +

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    Definition at line 347 of file fe_nedelec_sz.h.

    @@ -389,7 +389,7 @@

    Storage for gradients of edge extension parameters in 2d. In this case they are constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

    +

    edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

    Definition at line 358 of file fe_nedelec_sz.h.

    @@ -409,7 +409,7 @@

    Storage for gradients of edge extension parameters in 3d. In this case they are non-constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    +

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    Definition at line 369 of file fe_nedelec_sz.h.

    @@ -429,7 +429,7 @@

    Storage for 2nd derivatives of edge extension parameters in 3d, which are constant across the cell. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

    +

    edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

    Definition at line 381 of file fe_nedelec_sz.h.

    @@ -449,10 +449,10 @@

    Storage for the face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
-\lambda_{v4}$.

    -

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    -

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    +

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
+\lambda_{v4}$.

    +

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    +

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    Definition at line 399 of file fe_nedelec_sz.h.

    @@ -472,7 +472,7 @@

    Storage for gradients of face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

    +

    face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

    Definition at line 409 of file fe_nedelec_sz.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-03-17 21:57:25.667124502 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-03-17 21:57:25.667124502 +0000 @@ -466,7 +466,7 @@ class FE_Nothing< dim, spacedim >

    Definition of a finite element space with zero degrees of freedom and that, consequently, can only represent a single function: the zero function.

    This class is useful (in the context of an hp-method) to represent empty cells in the triangulation on which no degrees of freedom should be allocated, or to describe a field that is extended by zero to a part of the domain where we don't need it. Thus a triangulation may be divided into two regions: an active region where normal elements are used, and an inactive region where FE_Nothing elements are used. The DoFHandler will therefore assign no degrees of freedom to the FE_Nothing cells, and this subregion is therefore implicitly deleted from the computation. step-10 and step-46 show use cases for this element. An interesting application for this element is also presented in the paper [Cangiani2012].

    FE_Nothing as seen as a function space

    -

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    +

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    FE_Nothing in combination with other elements

    In situations such as those of step-46, one uses FE_Nothing on cells where one is not interested in a solution variable. For example, in fluid structure interaction problems, the fluid velocity is only defined on cells inside the fluid part of the domain. One then uses FE_Nothing on cells in the solid part of the domain to describe the finite element space for the velocity. In other words, the velocity lives everywhere conceptually, but it is identically zero in those parts of the domain where it is not of interest and doesn't use up any degrees of freedom there.

    The question is what happens at the interface between areas where one is interested in the solution (and uses a "normal" finite element) and where one is not interested (and uses FE_Nothing): Should the solution at that interface be zero – i.e., we consider a "continuous" finite element field that happens to be zero in that area where FE_Nothing is used – or is there no requirement for continuity at the interface. In the deal.II language, this is encoded by what the function FiniteElement::compare_for_domination() returns: If the FE_Nothing "dominates", then the solution must be zero at the interface; if it does not, then there is no requirement and one can think of FE_Nothing as a function space that is in general discontinuous (i.e., there is no requirement for any kind of continuity at cell interfaces) but on every cell equal to zero.

    @@ -2274,7 +2274,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -2968,7 +2968,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3076,7 +3076,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3355,9 +3355,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3392,11 +3392,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-03-17 21:57:25.811125392 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-03-17 21:57:25.819125441 +0000 @@ -473,13 +473,13 @@

    Detailed Description

    Implementation of the scalar version of the P1 nonconforming finite element, a piecewise linear element on quadrilaterals in 2d. This implementation is only for 2d cells in a 2d space (i.e., codimension 0).

    Unlike the usual continuous, $H^1$ conforming finite elements, the P1 nonconforming element does not enforce continuity across edges. However, it requires the continuity in an integral sense: any function in the space should have the same integral values on two sides of the common edge shared by two adjacent elements.

    -

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    +

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    Dice Rule

    Since any function in the P1 nonconforming space is piecewise linear on each element, the function value at the midpoint of each edge is same as the mean value on the edge. Thus the continuity of the integral value across each edge is equivalent to the continuity of the midpoint value of each edge in this case.

    Thus for the P1 nonconforming element, the function values at midpoints on edges of a cell are important. The first attempt to define (local) degrees of freedom (DoFs) on a quadrilateral is by using midpoint values of a function.

    However, these 4 functionals are not linearly independent because a linear function on 2d is uniquely determined by only 3 independent values. A simple observation reads that any linear function on a quadrilateral should satisfy the 'dice rule': the sum of two function values at the midpoints of the edge pair on opposite sides of a cell is equal to the sum of those at the midpoints of the other edge pair. This is called the 'dice rule' because the number of points on opposite sides of a dice always adds up to the same number as well (in the case of dice, to seven).

    -

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
-  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    +

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
+  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    Conversely if 4 values at midpoints satisfying the dice rule are given, then there always exists the unique linear function which coincides with 4 midpoints values.

    Due to the dice rule, three values at any three midpoints can determine the last value at the last midpoint. It means that the number of independent local functionals on a cell is 3, and this is also the dimension of the linear polynomial space on a cell in 2d.

    Shape functions

    @@ -495,11 +495,11 @@ * | | * | | * 0---------|---------1 -*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    -

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    +*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    +

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    • -

      shape function $\phi_0$:

      *  +--------0.0--------+
      +

      shape function $\phi_0$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -513,7 +513,7 @@
       *  

    • -

      shape function $\phi_1$:

      *  +--------0.0--------+
      +

      shape function $\phi_1$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -527,7 +527,7 @@
       *  

    • -

      shape function $\phi_2$:

      *  +--------0.5--------+
      +

      shape function $\phi_2$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -541,7 +541,7 @@
       *  

    • -

      shape function $\phi_3$:

      *  +--------0.5--------+
      +

      shape function $\phi_3$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -871,8 +871,8 @@
         
       
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
-x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    +

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
+x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    Definition at line 89 of file fe_p1nc.cc.

    @@ -2300,7 +2300,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -2944,7 +2944,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3046,7 +3046,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3330,9 +3330,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3365,11 +3365,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-03-17 21:57:25.979126430 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-03-17 21:57:25.987126479 +0000 @@ -1400,17 +1400,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1445,21 +1445,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2397,7 +2397,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3091,7 +3091,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3199,7 +3199,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3509,9 +3509,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3546,11 +3546,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-03-17 21:57:26.143127443 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-03-17 21:57:26.143127443 +0000 @@ -2271,7 +2271,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -2965,7 +2965,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3073,7 +3073,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3383,9 +3383,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3420,11 +3420,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-03-17 21:57:26.299128406 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-03-17 21:57:26.307128456 +0000 @@ -493,12 +493,12 @@

    Similarly, in many cases, node functionals depend on the shape of the mesh cell, since they evaluate normal or tangential components on the faces. In order to allow for a set of transformations, the variable mapping_kind has been introduced. It needs be set in the constructor of a derived class.

    Any derived class must decide on the polynomial space to use. This polynomial space should be implemented simply as a set of vector valued polynomials like PolynomialsBDM and PolynomialsRaviartThomas. In order to facilitate this implementation, which basis the polynomial space chooses is not of importance to the current class – as described next, this class handles the transformation from the basis chosen by the polynomial space template argument to the basis we want to use for finite element computations internally.

    Determining the correct basis

    -

    In most cases, the basis used by the class that describes the polynomial space, $\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    -\begin{align*}
+<p>In most cases, the basis used by the class that describes the polynomial space, <picture><source srcset=$\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    +\begin{align*}
   \varphi_j = \sum_k c_{jk} \tilde\varphi_j.
-\end{align*} +\end{align*}" src="form_1149.png"/>

    -

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    +

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    // basis functions from the raw ones. For a discussion of what
    // exactly happens here, see FETools::compute_node_matrix.
    @@ -511,7 +511,7 @@
    void invert(const FullMatrix< number2 > &M)
    FullMatrix< double > compute_node_matrix(const FiniteElement< dim, spacedim > &fe)
    -

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    +

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    In order for this scheme to work, it is important to ensure that the size of the inverse_node_matrix be zero at the time when FETools::compute_node_matrix() is called; thus, the call to this function cannot be inlined into the last line – the result of the call really does need to be stored in the temporary object M.

    Setting the transformation

    In most cases, vector valued basis functions must be transformed when mapped from the reference cell to the actual grid cell. These transformations can be selected from the set MappingKind and stored in mapping_kind. Therefore, each constructor should contain a line like:

    @@ -2308,7 +2308,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3002,7 +3002,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3110,7 +3110,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3420,9 +3420,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3457,11 +3457,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-03-17 21:57:26.455129370 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-03-17 21:57:26.463129419 +0000 @@ -700,11 +700,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    @@ -1773,17 +1773,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1818,21 +1818,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2708,7 +2708,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3402,7 +3402,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3510,7 +3510,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3820,9 +3820,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-03-17 21:57:26.603130285 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-03-17 21:57:26.611130333 +0000 @@ -837,11 +837,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    @@ -1910,17 +1910,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1955,21 +1955,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2724,7 +2724,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3418,7 +3418,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3526,7 +3526,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3836,9 +3836,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-03-17 21:57:26.759131247 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-03-17 21:57:26.763131272 +0000 @@ -642,11 +642,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    @@ -1715,17 +1715,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1760,21 +1760,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2712,7 +2712,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3406,7 +3406,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3514,7 +3514,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3824,9 +3824,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-03-17 21:57:26.923132261 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-03-17 21:57:26.927132285 +0000 @@ -484,7 +484,7 @@

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p. This TensorProductPolynomials object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomials object that includes the tensor product of Lagrange polynomials with the support points from points.

    Furthermore the constructor fills the interface_constraints, the prolongation (embedding) and the restriction matrices. These are implemented only up to a certain degree and may not be available for very high polynomial degree.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    If combined with Gauss-Lobatto quadrature, FE_Q based on the default support points gives diagonal mass matrices. This case is demonstrated in step-48. However, this element can be combined with arbitrary quadrature rules through the usual FEValues approach, including full Gauss quadrature. In the general case, the mass matrix is non-diagonal.

    Numbering of the degrees of freedom (DoFs)

    @@ -670,9 +670,9 @@ - @@ -685,9 +685,9 @@ - @@ -700,9 +700,9 @@ - @@ -715,9 +715,9 @@ - @@ -727,7 +727,7 @@

    -
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    $Q_2$ element, shape function 0

    +

    $Q_2$ element, shape function 0

    -

    $Q_2$ element, shape function 1

    +

    $Q_2$ element, shape function 1

    $Q_2$ element, shape function 2

    +

    $Q_2$ element, shape function 2

    -

    $Q_2$ element, shape function 3

    +

    $Q_2$ element, shape function 3

    $Q_2$ element, shape function 4

    +

    $Q_2$ element, shape function 4

    -

    $Q_2$ element, shape function 5

    +

    $Q_2$ element, shape function 5

    $Q_2$ element, shape function 6

    +

    $Q_2$ element, shape function 6

    -

    $Q_2$ element, shape function 7

    +

    $Q_2$ element, shape function 7

    $Q_2$ element, shape function 8

    +

    $Q_2$ element, shape function 8

    @@ -896,9 +896,9 @@
    -

    $Q_4$ element, shape function 0

    +

    $Q_4$ element, shape function 0

    -

    $Q_4$ element, shape function 1

    +

    $Q_4$ element, shape function 1

    @@ -911,9 +911,9 @@ -

    $Q_4$ element, shape function 2

    +

    $Q_4$ element, shape function 2

    -

    $Q_4$ element, shape function 3

    +

    $Q_4$ element, shape function 3

    @@ -926,9 +926,9 @@ -

    $Q_4$ element, shape function 4

    +

    $Q_4$ element, shape function 4

    -

    $Q_4$ element, shape function 5

    +

    $Q_4$ element, shape function 5

    @@ -941,9 +941,9 @@ -

    $Q_4$ element, shape function 6

    +

    $Q_4$ element, shape function 6

    -

    $Q_4$ element, shape function 7

    +

    $Q_4$ element, shape function 7

    @@ -956,9 +956,9 @@ -

    $Q_4$ element, shape function 8

    +

    $Q_4$ element, shape function 8

    -

    $Q_4$ element, shape function 9

    +

    $Q_4$ element, shape function 9

    @@ -971,9 +971,9 @@ -

    $Q_4$ element, shape function 10

    +

    $Q_4$ element, shape function 10

    -

    $Q_4$ element, shape function 11

    +

    $Q_4$ element, shape function 11

    @@ -986,9 +986,9 @@ -

    $Q_4$ element, shape function 12

    +

    $Q_4$ element, shape function 12

    -

    $Q_4$ element, shape function 13

    +

    $Q_4$ element, shape function 13

    @@ -1001,9 +1001,9 @@ -

    $Q_4$ element, shape function 14

    +

    $Q_4$ element, shape function 14

    -

    $Q_4$ element, shape function 15

    +

    $Q_4$ element, shape function 15

    @@ -1016,9 +1016,9 @@ -

    $Q_4$ element, shape function 16

    +

    $Q_4$ element, shape function 16

    -

    $Q_4$ element, shape function 17

    +

    $Q_4$ element, shape function 17

    @@ -1031,9 +1031,9 @@ -

    $Q_4$ element, shape function 18

    +

    $Q_4$ element, shape function 18

    -

    $Q_4$ element, shape function 19

    +

    $Q_4$ element, shape function 19

    @@ -1046,9 +1046,9 @@ -

    $Q_4$ element, shape function 20

    +

    $Q_4$ element, shape function 20

    -

    $Q_4$ element, shape function 21

    +

    $Q_4$ element, shape function 21

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-03-17 21:57:27.079133224 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-03-17 21:57:27.079133224 +0000 @@ -2287,17 +2287,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2332,21 +2332,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3587,7 +3587,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3695,7 +3695,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3974,9 +3974,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4011,11 +4011,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-03-17 21:57:27.227134138 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-03-17 21:57:27.235134188 +0000 @@ -485,17 +485,17 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_Q_Bubbles< dim, spacedim >

    Implementation of a scalar Lagrange finite element $Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
+class FE_Q_Bubbles< dim, spacedim ></div><p>Implementation of a scalar Lagrange finite element <picture><source srcset=$Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
 = 2^{p-1}\left(x_j-\frac 12\right)^{p-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_1157.png"/>. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    This class is realized using tensor product polynomials based on equidistant or given support points, in the same way as one can provide support points to the FE_Q class's constructors.

    For more information about the spacedim template parameter check the documentation of the FiniteElement class, or the one of Triangulation.

    -

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    +

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    -

    Therefore, this element should be used with care for $p>3$.

    +

    Therefore, this element should be used with care for $p>3$.

    Implementation

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p plus the bubble enrichments. This TensorProductPolynomialsBubbles object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomialsBubbles object that includes the tensor product of Lagrange polynomials with the support points from points and the bubble enrichments as defined above.

    Furthermore the constructor fills the interface_constrains, the prolongation (embedding) and the restriction matrices.

    @@ -714,11 +714,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2497,17 +2497,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2542,21 +2542,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3704,7 +3704,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3812,7 +3812,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4091,9 +4091,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-03-17 21:57:27.383135102 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-03-17 21:57:27.383135102 +0000 @@ -885,11 +885,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2666,17 +2666,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2711,21 +2711,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3873,7 +3873,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3981,7 +3981,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4260,9 +4260,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-03-17 21:57:27.551136140 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-03-17 21:57:27.555136164 +0000 @@ -520,7 +520,7 @@

    Numbering of the degrees of freedom (DoFs)

    The original ordering of the shape functions represented by the TensorProductPolynomials is a tensor product numbering. However, the shape functions on a cell are renumbered beginning with the shape functions whose support points are at the vertices, then on the line, on the quads, and finally (for 3d) on the hexes. To be explicit, these numberings are listed in the following:

    Q1 elements

    -

    The $Q_1^H$ element is of polynomial degree one and, consequently, is exactly the same as the $Q_1$ element in class FE_Q. In particular, the shape function are defined in the exact same way:

    +

    The $Q_1^H$ element is of polynomial degree one and, consequently, is exactly the same as the $Q_1$ element in class FE_Q. In particular, the shape function are defined in the exact same way:

    • 1d case:

      *      0-------1
      @@ -576,9 +576,9 @@
       
          
       
    - @@ -591,9 +591,9 @@ - +
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    $Q_1^H$ element, shape function 0

    +

    $Q_1^H$ element, shape function 0

    -

    $Q_1^H$ element, shape function 1

    +

    $Q_1^H$ element, shape function 1

    $Q_1^H$ element, shape function 2

    +

    $Q_1^H$ element, shape function 2

    -
    $Q_1^H$ element, shape function 3
    $Q_1^H$ element, shape function 3

    Q2 elements

      @@ -701,9 +701,9 @@
    -

    $Q_2^H$ element, shape function 0

    +

    $Q_2^H$ element, shape function 0

    -

    $Q_2^H$ element, shape function 1

    +

    $Q_2^H$ element, shape function 1

    @@ -716,9 +716,9 @@
    -

    $Q_2^H$ element, shape function 2

    +

    $Q_2^H$ element, shape function 2

    -

    $Q_2^H$ element, shape function 3

    +

    $Q_2^H$ element, shape function 3

    @@ -731,9 +731,9 @@ -

    $Q_2^H$ element, shape function 4

    +

    $Q_2^H$ element, shape function 4

    -

    $Q_2^H$ element, shape function 5

    +

    $Q_2^H$ element, shape function 5

    @@ -746,9 +746,9 @@ -

    $Q_2^H$ element, shape function 6

    +

    $Q_2^H$ element, shape function 6

    -

    $Q_2^H$ element, shape function 7

    +

    $Q_2^H$ element, shape function 7

    @@ -758,7 +758,7 @@

    -

    $Q_2^H$ element, shape function 8

    +

    $Q_2^H$ element, shape function 8

    @@ -789,9 +789,9 @@ -

    $Q_3^H$ element, shape function 0

    +

    $Q_3^H$ element, shape function 0

    -

    $Q_3^H$ element, shape function 1

    +

    $Q_3^H$ element, shape function 1

    @@ -804,9 +804,9 @@ -

    $Q_3^H$ element, shape function 2

    +

    $Q_3^H$ element, shape function 2

    -

    $Q_3^H$ element, shape function 3

    +

    $Q_3^H$ element, shape function 3

    @@ -819,9 +819,9 @@ -

    $Q_3^H$ element, shape function 4

    +

    $Q_3^H$ element, shape function 4

    -

    $Q_3^H$ element, shape function 5

    +

    $Q_3^H$ element, shape function 5

    @@ -834,9 +834,9 @@ -

    $Q_3^H$ element, shape function 6

    +

    $Q_3^H$ element, shape function 6

    -

    $Q_3^H$ element, shape function 7

    +

    $Q_3^H$ element, shape function 7

    @@ -849,9 +849,9 @@ -

    $Q_3^H$ element, shape function 8

    +

    $Q_3^H$ element, shape function 8

    -

    $Q_3^H$ element, shape function 9

    +

    $Q_3^H$ element, shape function 9

    @@ -864,9 +864,9 @@ -

    $Q_3^H$ element, shape function 10

    +

    $Q_3^H$ element, shape function 10

    -

    $Q_3^H$ element, shape function 11

    +

    $Q_3^H$ element, shape function 11

    @@ -879,9 +879,9 @@ -

    $Q_3^H$ element, shape function 12

    +

    $Q_3^H$ element, shape function 12

    -

    $Q_3^H$ element, shape function 13

    +

    $Q_3^H$ element, shape function 13

    @@ -894,9 +894,9 @@ -

    $Q_3^H$ element, shape function 14

    +

    $Q_3^H$ element, shape function 14

    -$Q_3^H$ element, shape function 15 +$Q_3^H$ element, shape function 15

    Q4 elements

    -

    The optional parameter adding determines, whether the result is stored in the vector $\mathbf w = \mathbf A \cdot \mathbf v$ or added to it $\mathbf w \mathrel{+}= \mathbf A \cdot \mathbf v$.

    +

    The optional parameter adding determines, whether the result is stored in the vector $\mathbf w = \mathbf A \cdot \mathbf v$ or added to it $\mathbf w \mathrel{+}= \mathbf A \cdot \mathbf v$.

    Note
    Source and destination must not be the same vector.
    The template with number2 only exists for compile-time compatibility with FullMatrix. Only the case number2 = number is implemented due to limitations in the underlying LAPACK interface. All other variants throw an error upon invocation.
    @@ -1309,8 +1309,8 @@ const Vector< number2 > & v&#href_anchor"memdoc"> -

    Adding Matrix-vector-multiplication $\mathbf w \mathrel{+}= \mathbf A \cdot
-\mathbf v$.

    +

    Adding Matrix-vector-multiplication $\mathbf w \mathrel{+}= \mathbf A \cdot
+\mathbf v$.

    See the documentation of vmult() for details on the implementation.

    Definition at line 1136 of file lapack_full_matrix.h.

    @@ -1365,7 +1365,7 @@ const bool adding = false&#href_anchor"memdoc">

    Transpose matrix-vector-multiplication.

    -

    The optional parameter adding determines, whether the result is stored in the vector $\mathbf w = \mathbf A^T \cdot \mathbf v$ or added to it $\mathbf w \mathrel{+}= \mathbf A^T \cdot \mathbf v$.

    +

    The optional parameter adding determines, whether the result is stored in the vector $\mathbf w = \mathbf A^T \cdot \mathbf v$ or added to it $\mathbf w \mathrel{+}= \mathbf A^T \cdot \mathbf v$.

    See the documentation of vmult() for details on the implementation.

    Definition at line 1149 of file lapack_full_matrix.h.

    @@ -1419,8 +1419,8 @@ const Vector< number2 > & v&#href_anchor"memdoc"> -

    Adding transpose matrix-vector-multiplication $\mathbf w \mathrel{+}=
-\mathbf A^T \cdot \mathbf v$.

    +

    Adding transpose matrix-vector-multiplication $\mathbf w \mathrel{+}=
+\mathbf A^T \cdot \mathbf v$.

    See the documentation of vmult() for details on the implementation.

    Definition at line 1163 of file lapack_full_matrix.h.

    @@ -1473,7 +1473,7 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication.

    -

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C            = \mathbf A \cdot \mathbf B$ or added to it $\mathbf C \mathrel{+}= \mathbf A \cdot \mathbf B$.

    +

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C            = \mathbf A \cdot \mathbf B$ or added to it $\mathbf C \mathrel{+}= \mathbf A \cdot \mathbf B$.

    Note
    It is assumed that A and B have compatible sizes and that C already has the right size.

    This function uses the BLAS function Xgemm.

    @@ -1532,7 +1532,7 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of this.

    -

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C = \mathbf A^T \cdot \mathbf B$ or added to it $\mathbf C \mathrel{+}= \mathbf A^T \cdot \mathbf B$.

    +

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C = \mathbf A^T \cdot \mathbf B$ or added to it $\mathbf C \mathrel{+}= \mathbf A^T \cdot \mathbf B$.

    Note
    It is assumed that A and B have compatible sizes and that C already has the right size.
    This function uses the BLAS function Xgemm.
    @@ -1597,11 +1597,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of this and a diagonal vector V.

    -

    If the adding=false then the result is stored in the matrix $\mathbf C = \mathbf A^T \cdot \rm{diag}(\mathbf V) \cdot \mathbf B$ otherwise it is added $\mathbf C \mathrel{+}= \mathbf A^T \cdot
-\rm{diag}(\mathbf V) \cdot \mathbf B$.

    +

    If the adding=false then the result is stored in the matrix $\mathbf C = \mathbf A^T \cdot \rm{diag}(\mathbf V) \cdot \mathbf B$ otherwise it is added $\mathbf C \mathrel{+}= \mathbf A^T \cdot
+\rm{diag}(\mathbf V) \cdot \mathbf B$.

    Note
    It is assumed that A, B and V have compatible sizes and that C already has the right size.
    -This function is not provided by LAPACK. The function first forms $\rm{diag}(\mathbf V) \cdot \mathbf B$ product and then uses the Xgemm function.
    +This function is not provided by LAPACK. The function first forms $\rm{diag}(\mathbf V) \cdot \mathbf B$ product and then uses the Xgemm function.

    Definition at line 1028 of file lapack_full_matrix.cc.

    @@ -1630,7 +1630,7 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of B.

    -

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C = \mathbf A \cdot \mathbf B^T$ or added to it $\mathbf C \mathrel{+}= \mathbf A \cdot \mathbf B^T$.

    +

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C = \mathbf A \cdot \mathbf B^T$ or added to it $\mathbf C \mathrel{+}= \mathbf A \cdot \mathbf B^T$.

    Note
    It is assumed that A and B have compatible sizes and that C already has the right size.
    This function uses the BLAS function Xgemm.
    @@ -1690,7 +1690,7 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of this and B.

    -

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C = \mathbf A^T \cdot \mathbf B^T$ or added to it $\mathbf C \mathrel{+}= \mathbf A^T \cdot \mathbf B^T$.

    +

    The optional parameter adding determines, whether the result is stored in the matrix $\mathbf C = \mathbf A^T \cdot \mathbf B^T$ or added to it $\mathbf C \mathrel{+}= \mathbf A^T \cdot \mathbf B^T$.

    Note
    It is assumed that A and B have compatible sizes and that C already has the right size.
    This function uses the BLAS function Xgemm.
    @@ -1768,8 +1768,8 @@
    -

    Scale rows of this matrix by V . This is equivalent to premultiplication with a diagonal matrix $\mathbf A\leftarrow {\rm diag}(\mathbf V)\mathbf
-A$.

    +

    Scale rows of this matrix by V . This is equivalent to premultiplication with a diagonal matrix $\mathbf A\leftarrow {\rm diag}(\mathbf V)\mathbf
+A$.

    Definition at line 1111 of file lapack_full_matrix.cc.

    @@ -1836,13 +1836,13 @@
    -

    Estimate the reciprocal of the condition number $1/k(\mathbf A)$ in $L_1$ norm ( $1/(||\mathbf A||_1 \, ||\mathbf A^{-1}||_1)$) of a symmetric positive definite matrix using Cholesky factorization. This function can only be called if the matrix is already factorized.

    -
    Note
    The condition number $k(\mathbf A)$ can be used to estimate the numerical error related to the matrix inversion or the solution of the system of linear algebraic equations as error = std::numeric_limits<Number>::epsilon * k. Alternatively one can get the number of accurate digits std::floor(std::log10(k)).
    +

    Estimate the reciprocal of the condition number $1/k(\mathbf A)$ in $L_1$ norm ( $1/(||\mathbf A||_1 \, ||\mathbf A^{-1}||_1)$) of a symmetric positive definite matrix using Cholesky factorization. This function can only be called if the matrix is already factorized.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-03-17 21:57:31.591161094 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-03-17 21:57:31.591161094 +0000 @@ -1103,7 +1103,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 544 of file cuda_vector.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-03-17 21:57:31.659161514 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-03-17 21:57:31.667161564 +0000 @@ -314,7 +314,7 @@

    Detailed Description

    template<typename Number>
    -class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    +class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    This class allows to access individual elements to be read or written. However, it does not allow global operations such as taking the norm. ReadWriteVector can be used to read and write elements in vectors derived from VectorSpaceVector such as TrilinosWrappers::MPI::Vector and PETScWrappers::MPI::Vector.

    Storing elements

    Most of the time, one will simply read from or write into a vector of the current class using the global numbers of these degrees of freedom. This is done using operator()() or operator[]() which call global_to_local() to transform the global index into a local one. In such cases, it is clear that one can only access elements of the vector that the current object indeed stores.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-03-17 21:57:31.755162108 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-03-17 21:57:31.759162132 +0000 @@ -1402,7 +1402,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    @@ -1704,7 +1704,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate scalar product between locally owned degrees of freedom.
    @@ -1734,7 +1734,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate the scalar product between locally owned degrees of freedom.
    @@ -1769,7 +1769,7 @@ const Number b = Number(1.)&#href_anchor"memdoc"> -

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    +

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    @@ -2055,7 +2055,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    Implements LinearAlgebra::VectorSpaceVector< Number >.

    @@ -2077,7 +2077,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-03-17 21:57:31.851162701 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-03-17 21:57:31.859162750 +0000 @@ -1034,7 +1034,7 @@ const MPI_Comm comm_sm = MPI_COMM_SELF&#href_anchor"memdoc">

    Initialize vector with local_size locally-owned and ghost_size ghost degrees of freedoms.

    The optional argument comm_sm, which consists of processes on the same shared-memory domain, allows users have read-only access to both locally-owned and ghost values of processes combined in the shared-memory communicator. See the general documentation of this class for more information about this argument.

    -
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    +
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    @@ -1199,7 +1199,7 @@

    Initiates communication for the compress() function with non- blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

    Before the data is actually exchanged, the function must be followed by a call to compress_finish().

    -

    In case this function is called for more than one vector before compress_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    +

    In case this function is called for more than one vector before compress_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    @@ -1244,7 +1244,7 @@

    Initiates communication for the update_ghost_values() function with non-blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

    Before the data is actually exchanged, the function must be followed by a call to update_ghost_values_finish().

    -

    In case this function is called for more than one vector before update_ghost_values_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    +

    In case this function is called for more than one vector before update_ghost_values_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    @@ -1953,7 +1953,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    Implements LinearAlgebra::VectorSpaceVector< Number >.

    @@ -1975,7 +1975,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    @@ -2720,7 +2720,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-03-17 21:57:31.899162997 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-03-17 21:57:31.907163046 +0000 @@ -224,7 +224,7 @@
    LinearOperator::reinit_range_vector
    std::function< void(Range &v, bool omit_zeroing_entries) reinit_range_vector)
    Definition linear_operator.h:302

    that store the knowledge how to initialize (resize + internal data structures) an arbitrary vector of the Range and Domain space.

    The primary purpose of this class is to provide syntactic sugar for complex matrix-vector operations and free the user from having to create, set up and handle intermediate storage locations by hand.

    -

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    +

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    ::SparseMatrix<double> A, B, C;
    /usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-03-17 21:57:31.955163343 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-03-17 21:57:31.959163367 +0000 @@ -204,11 +204,11 @@

    In the most essential use of manifolds, manifold descriptions are used to create a "point between other points". For example, when a triangulation creates a new vertex on a cell, face, or edge, it determines the new vertex' coordinates through the following function call:

    ...
    Point<spacedim> new_vertex = manifold.get_new_point (points,weights);
    ...
    -

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    +

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    Note
    Unlike almost all other cases in the library, we here interpret the points to be in real space, not on the reference cell.

    Manifold::get_new_point() has a default implementation that can simplify this process somewhat: Internally, the function calls the Manifold::get_intermediate_point() to compute pair-wise intermediate points. Internally the Manifold::get_intermediate_point() calls the Manifold::project_to_manifold() function after computing the convex combination of the given points. This allows derived classes to only overload Manifold::project_to_manifold() for simple situations. This is often useful when describing manifolds that are embedded in higher dimensional space, e.g., the surface of a sphere. In those cases, the desired new point may be computed simply by the (weighted) average of the provided points, projected back out onto the sphere.

    Common use case: Computing tangent vectors

    -

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    +

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    To make this possible, the Manifold class provides a member function (to be implemented by derived classes) that computes a "vector tangent to the manifold at one point, in direction of another point" via the Manifold::get_tangent_vector() function. For example, in 2d, one would use this function with the two vertices of an edge at the boundary to compute a "tangential" vector along the edge, and then get the normal vector by rotation by 90 degrees. In 3d, one would compute the two vectors "tangential" to the two edges of a boundary face adjacent to a boundary vertex, and then take the cross product of these two to obtain a vector normal to the boundary.

    For reasons that are more difficult to understand, these direction vectors are normalized in a very specific way, rather than to have unit norm. See the documentation of Manifold::get_tangent_vector(), as well as below, for more information.

    @@ -216,11 +216,11 @@

    A unified description

    The "real" way to understand what this class does is to see it in the framework of differential geometry. More specifically, differential geometry is fundamentally based on the assumption that two sufficiently close points are connected via a line of "shortest distance". This line is called a "geodesic", and it is selected from all other lines that connect the two points by the property that it is shortest if distances are measured in terms of the "metric" that describes a manifold. To give examples, recall that the geodesics of a flat manifold (implemented in the FlatManifold class) are simply the straight lines connecting two points, whereas for spherical manifolds (see the SphericalManifold class) geodesics between two points of same distance are the grand circles, and are in general curved lines when connecting two lines of different distance from the origin.

    In the following discussion, and for the purposes of implementing the current class, the concept of "metrics" that is so fundamental to differential geometry is no longer of great importance to us. Rather, everything can simply be described by postulating the existence of geodesics connecting points on a manifold.

    -

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    -

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    -

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    -

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
-\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    +

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    +

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    +

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    +

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
+\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    Definition at line 286 of file manifold.h.

    Member Typedef Documentation

    @@ -648,11 +648,11 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    -

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    -

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    +

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    +

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-03-17 21:57:32.019163738 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-03-17 21:57:32.023163763 +0000 @@ -229,84 +229,84 @@ class Mapping< dim, spacedim >

    Abstract base class for mapping classes.

    This class declares the interface for the functionality to describe mappings from the reference (unit) cell to a cell in real space, as well as for filling the information necessary to use the FEValues, FEFaceValues, and FESubfaceValues classes. Concrete implementations of these interfaces are provided in derived classes.

    Mathematics of the mapping

    -

    The mapping is a transformation $\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
-\hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

    -\[
+<p>The mapping is a transformation <picture><source srcset=$\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
+\hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

    +\[
 J(\hat{\mathbf  x}) = \left(\begin{matrix}
 \frac{\partial x}{\partial \hat x} & \frac{\partial x}{\partial \hat y}
 \\
 \frac{\partial y}{\partial \hat x} & \frac{\partial y}{\partial \hat y}
 \end{matrix}\right)
-\] +\]" src="form_1265.png"/>

    Mapping of scalar functions

    The shape functions of scalar finite elements are typically defined on a reference cell and are then simply mapped according to the rule

    -\[
+<picture><source srcset=\[
 \varphi(\mathbf x) = \varphi\bigl(\mathbf F_K(\hat{\mathbf  x})\bigr)
 = \hat \varphi(\hat{\mathbf  x}).
-\] +\]" src="form_1266.png"/>

    Mapping of integrals

    -

    Using simply a change of variables, integrals of scalar functions over a cell $K$ can be expressed as an integral over the reference cell $\hat K$. Specifically, The volume form $d\hat x$ is transformed so that

    -\[
+<p>Using simply a change of variables, integrals of scalar functions over a cell <picture><source srcset=$K$ can be expressed as an integral over the reference cell $\hat K$. Specifically, The volume form $d\hat x$ is transformed so that

    +\[
  \int_K u(\mathbf x)\,dx = \int_{\hat K} \hat
 u(\hat{\mathbf  x}) \left|\text{det}J(\hat{\mathbf  x})\right|
 \,d\hat x.
-\] +\]" src="form_1268.png"/>

    In expressions where such integrals are approximated by quadrature, this then leads to terms of the form

    -\[
+<picture><source srcset=\[
  \int_K u(\mathbf x)\,dx
  \approx
  \sum_{q}
  \hat u(\hat{\mathbf  x}_q)
  \underbrace{\left|\text{det}J(\hat{\mathbf  x}_q)\right| w_q}_{=:
 \text{JxW}_q}.
-\] +\]" src="form_1269.png"/>

    -

    Here, the weights $\text{JxW}_q$ of each quadrature point (where JxW mnemonically stands for Jacobian times Quadrature Weights) take the role of the $dx$ in the original integral. Consequently, they appear in all code that computes integrals approximated by quadrature, and are accessed by FEValues::JxW().

    +

    Here, the weights $\text{JxW}_q$ of each quadrature point (where JxW mnemonically stands for Jacobian times Quadrature Weights) take the role of the $dx$ in the original integral. Consequently, they appear in all code that computes integrals approximated by quadrature, and are accessed by FEValues::JxW().

    Todo
    Document what happens in the codimension-1 case.

    Mapping of vector fields, differential forms and gradients of vector fields

    The transformation of vector fields or differential forms (gradients of scalar functions) $\mathbf v$, and gradients of vector fields $\mathbf T$ follows the general form

    -\[
+<picture><source srcset=\[
 \mathbf v(\mathbf x) = \mathbf A(\hat{\mathbf  x})
 \hat{\mathbf  v}(\hat{\mathbf  x}),
 \qquad
 \mathbf T(\mathbf x) = \mathbf A(\hat{\mathbf  x})
 \hat{\mathbf  T}(\hat{\mathbf  x}) \mathbf B(\hat{\mathbf  x}).
-\] +\]" src="form_1272.png"/>

    The differential forms A and B are determined by the kind of object being transformed. These transformations are performed through the transform() functions, and the type of object being transformed is specified by their MappingKind argument. See the documentation there for possible choices.

    Derivatives of the mapping

    -

    Some applications require the derivatives of the mapping, of which the first order derivative is the mapping Jacobian, $J_{iJ}(\hat{\mathbf
-x})=\frac{\partial x_i}{\partial \hat x_J}$, described above. Higher order derivatives of the mapping are similarly defined, for example the Jacobian derivative, $\hat H_{iJK}(\hat{\mathbf  x}) = \frac{\partial^2
-x_i}{\partial \hat x_J \partial \hat x_K}$, and the Jacobian second derivative, $\hat K_{iJKL}(\hat{\mathbf  x}) = \frac{\partial^3
-x_i}{\partial \hat x_J \partial \hat x_K \partial \hat x_L}$. It is also useful to define the "pushed-forward" versions of the higher order derivatives: the Jacobian pushed-forward derivative, $H_{ijk}(\hat{\mathbf
+<p>Some applications require the derivatives of the mapping, of which the first order derivative is the mapping Jacobian, <picture><source srcset=$J_{iJ}(\hat{\mathbf
+x})=\frac{\partial x_i}{\partial \hat x_J}$, described above. Higher order derivatives of the mapping are similarly defined, for example the Jacobian derivative, $\hat H_{iJK}(\hat{\mathbf  x}) = \frac{\partial^2
+x_i}{\partial \hat x_J \partial \hat x_K}$, and the Jacobian second derivative, $\hat K_{iJKL}(\hat{\mathbf  x}) = \frac{\partial^3
+x_i}{\partial \hat x_J \partial \hat x_K \partial \hat x_L}$. It is also useful to define the "pushed-forward" versions of the higher order derivatives: the Jacobian pushed-forward derivative, $H_{ijk}(\hat{\mathbf
 x}) = \frac{\partial^2 x_i}{\partial \hat x_J \partial \hat
-x_K}(J_{jJ})^{-1}(J_{kK})^{-1}$, and the Jacobian pushed-forward second derivative, $K_{ijkl}(\hat{\mathbf  x}) = \frac{\partial^3 x_i}{\partial
+x_K}(J_{jJ})^{-1}(J_{kK})^{-1}$, and the Jacobian pushed-forward second derivative, $K_{ijkl}(\hat{\mathbf  x}) = \frac{\partial^3 x_i}{\partial
 \hat x_J \partial \hat x_K \partial \hat
-x_L}(J_{jJ})^{-1}(J_{kK})^{-1}(J_{lL})^{-1}$. These pushed-forward versions can be used to compute the higher order derivatives of functions defined on the reference cell with respect to the real cell coordinates. For instance, the Jacobian derivative with respect to the real cell coordinates is given by:

    +x_L}(J_{jJ})^{-1}(J_{kK})^{-1}(J_{lL})^{-1}$" src="form_1277.png"/>. These pushed-forward versions can be used to compute the higher order derivatives of functions defined on the reference cell with respect to the real cell coordinates. For instance, the Jacobian derivative with respect to the real cell coordinates is given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_j}\left[J_{iJ}(\hat{\mathbf  x})\right] =
 H_{ikn}(\hat{\mathbf  x})J_{nJ}(\hat{\mathbf  x}),
-\] +\]" src="form_1278.png"/>

    and the derivative of the Jacobian inverse with respect to the real cell coordinates is similarly given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_j}\left[\left(J_{iJ}(\hat{\mathbf
 x})\right)^{-1}\right] = -H_{nik}(\hat{\mathbf  x})\left(J_{nJ}(\hat{\mathbf
 x})\right)^{-1}.
-\] +\]" src="form_1279.png"/>

    In a similar fashion, higher order derivatives, with respect to the real cell coordinates, of functions defined on the reference cell can be defined using the Jacobian pushed-forward higher-order derivatives. For example, the derivative, with respect to the real cell coordinates, of the Jacobian pushed-forward derivative is given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_l}\left[H_{ijk}(\hat{\mathbf  x})\right] =
 K_{ijkl}(\hat{\mathbf  x}) -H_{mjl}(\hat{\mathbf  x})H_{imk}(\hat{\mathbf
 x})-H_{mkl}(\hat{\mathbf  x})H_{imj}(\hat{\mathbf  x}).
-\] +\]" src="form_1280.png"/>

    References

    A general publication on differential geometry and finite elements is the survey

      @@ -987,10 +987,10 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

    Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

    -

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

    +

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

    The information computed by this function is used to fill the various member variables of the output argument of this function. Which of the member variables of that structure should be filled is determined by the update flags stored in the Mapping::InternalDataBase object passed to this function.

    An extensive discussion of the interaction between this function and FEValues can be found in the How Mapping, FiniteElement, and FEValues work together documentation module.

    @@ -1249,37 +1249,37 @@

    The mapping kinds currently implemented by derived classes are:

    @@ -1331,21 +1331,21 @@
    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

    Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

    @@ -1031,21 +1031,21 @@

    The mapping kinds currently implemented by derived classes are:

    Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

    In the case when dim=spacedim the previous formula reduces to

    -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

    Parameters
    @@ -1097,40 +1097,40 @@
    -

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

    +

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

    The mapping kinds currently implemented by derived classes are:

    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2063,7 +2063,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2117,7 +2117,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-03-17 21:57:33.283171545 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-03-17 21:57:33.291171595 +0000 @@ -160,11 +160,11 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FEInterfaceValues< dim >

    This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

    -\[
+class NonMatching::FEInterfaceValues< dim ></div><p>This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to <a class=NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

    +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \},
-\] +\]" src="form_2029.png"/>

    which we as before refer to as the "inside" and "outside" regions of the face.

    @@ -198,7 +198,7 @@
    }
    void reinit(const CellIteratorType &cell, const unsigned int face_no, const unsigned int sub_face_no, const CellNeighborIteratorType &cell_neighbor, const unsigned int face_no_neighbor, const unsigned int sub_face_no_neighbor)
    -

    To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

    +

    To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

    Definition at line 437 of file fe_values.h.

    Member Typedef Documentation

    @@ -352,7 +352,7 @@ - + @@ -454,7 +454,7 @@
    mapping_collectionCollection of Mappings to be used.
    fe_collectionCollection of FiniteElements to be used.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
    q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
    mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
    region_update_flagsStruct storing UpdateFlags for the inside/outside region of the cell.
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 461 of file fe_values.cc.

    @@ -477,7 +477,7 @@
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 473 of file fe_values.cc.

    @@ -508,7 +508,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    Definition at line 332 of file fe_values.cc.

    @@ -762,7 +762,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEInterfaceValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEInterfaceValues class that does not have a copy-constructor.

    @@ -791,7 +791,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEInterfaceValues class that does not have a copy-constructor.

    @@ -820,7 +820,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 668 of file fe_values.h.

    @@ -847,7 +847,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 677 of file fe_values.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-03-17 21:57:33.331171843 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-03-17 21:57:33.331171843 +0000 @@ -159,17 +159,17 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FEValues< dim >

    This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    -\[
+class NonMatching::FEValues< dim ></div><p>This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, <picture><source srcset=$\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2026.png"/>

    Thus we need quadrature rules for these 3 regions:

    -

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std_cxx17::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    +

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std_cxx17::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    for (const auto &cell : dof_handler.active_cell_iterators())
    {
    @@ -190,7 +190,7 @@
    }
    std_cxx17::optional<::FEValues< dim > > fe_values_inside
    Definition fe_values.h:345
    -

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    +

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    Definition at line 144 of file fe_values.h.

    Member Typedef Documentation

    @@ -341,7 +341,7 @@ - + @@ -398,7 +398,7 @@
    mapping_collectionCollection of Mappings to be used.
    fe_collectionCollection of FiniteElements to be used.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
    mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
    region_update_flagsStruct storing UpdateFlags for the inside/outside/surface region of the cell.
    -

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 241 of file fe_values.cc.

    @@ -421,7 +421,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 253 of file fe_values.cc.

    @@ -444,7 +444,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is not intersected, the returned optional will not contain a value.

    Definition at line 265 of file fe_values.cc.

    @@ -475,7 +475,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    Definition at line 105 of file fe_values.cc.

    @@ -692,7 +692,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -721,7 +721,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -750,7 +750,7 @@
    -

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 345 of file fe_values.h.

    @@ -777,7 +777,7 @@
    -

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 354 of file fe_values.h.

    @@ -804,7 +804,7 @@
    -

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 364 of file fe_values.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-03-17 21:57:33.359172015 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-03-17 21:57:33.367172064 +0000 @@ -133,16 +133,16 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FaceQuadratureGenerator< dim >

    This class creates immersed quadrature rules over a face, $F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    -

    In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

    -\[
+class NonMatching::FaceQuadratureGenerator< dim ></div><p>This class creates immersed quadrature rules over a face, <picture><source srcset=$F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    +

    In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

    +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \}, \\
 S = \{x \in F : \psi(x) = 0 \},
-\] +\]" src="form_2069.png"/>

    -

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    -

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    +

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    +

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    Definition at line 292 of file quadrature_generator.h.

    Member Typedef Documentation

    @@ -239,7 +239,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1774 of file quadrature_generator.cc.

    @@ -261,7 +261,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1782 of file quadrature_generator.cc.

    @@ -283,8 +283,8 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 1791 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-03-17 21:57:33.391172213 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-03-17 21:57:33.395172237 +0000 @@ -220,7 +220,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1853 of file quadrature_generator.cc.

    @@ -242,7 +242,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1860 of file quadrature_generator.cc.

    @@ -262,7 +262,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    Note
    In 1d, this quadrature always contains 0 points.

    Definition at line 1868 of file quadrature_generator.cc.

    @@ -309,7 +309,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    Definition at line 455 of file quadrature_generator.h.

    @@ -334,7 +334,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    Definition at line 462 of file quadrature_generator.h.

    @@ -359,7 +359,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    +

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    Definition at line 469 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-03-17 21:57:33.431172459 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-03-17 21:57:33.439172509 +0000 @@ -207,41 +207,41 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim >

    This class defines a quadrature formula to integrate over the intersection between an oriented surface, $\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    -

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    -

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    -\[
+class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim ></div><p>This class defines a quadrature formula to integrate over the intersection between an oriented surface, <picture><source srcset=$\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    +

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    +

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    +\[
 \int_{S\cap K} f dS =
 \int_{S\cap K} f |d\bar{S}| =
 \int_{\hat{S}\cap\hat{K}} f \circ F_{K} \det(J) |\left( J^{-1} \right
 )^T d\hat{S}|,
-\] +\]" src="form_2043.png"/>

    -

    where $F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    -\[
+<p> where <picture><source srcset=$F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    +\[
 \Delta \hat{S}_q \dealcoloneq w_q \hat{n}_q \approx d\hat{S}(\hat{x}_q),
-\] +\]" src="form_2046.png"/>

    for each quadrature point. The surface integral in real space would then be approximated as

    -\[
+<picture><source srcset=\[
 \int_{S\cap K} f dS \approx
 \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \det(J_q)
 |\left( J_q^{-1} \right)^T \hat{n}_q| w_q.
-\] +\]" src="form_2047.png"/>

    -

    When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let $\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    -\[
+<p>When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let <picture><source srcset=$\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    +\[
 \int_{S\cap F} f dr
 = \int_{0}^T f(\bar{r}(t)) \left \|\frac{d\bar{r}}{dt} \right \| dt
 = \int_{0}^T f(F_K(\hat{r}(t))) \left \| J \frac{d\hat{r}}{dt} \right \| dt
 \approx \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \|J(\hat{x}_q)
 \hat{t}_q \| w_q,
-\] +\]" src="form_2053.png"/>

    -

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    +

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    Definition at line 107 of file immersed_surface_quadrature.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-03-17 21:57:33.467172682 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-03-17 21:57:33.471172707 +0000 @@ -131,24 +131,24 @@

    Detailed Description

    template<int dim>
    -class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    +class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    This class creates quadrature rules for the intersections between the box and the three different regions defined by the level set function. That is, it creates quadrature rules to integrate over the following regions

    -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi(x) < 0 \}, \\
 P = \{x \in B : \psi(x) > 0 \}, \\
 S = \{x \in B : \psi(x) = 0 \}.
-\] +\]" src="form_2063.png"/>

    -

    When working with level set functions, the most common is to describe a domain, $\Omega$, as

    -\[
+<p>When working with level set functions, the most common is to describe a domain, <picture><source srcset=$\Omega$, as

    +\[
 \Omega = \{ x \in \mathbb{R}^{dim} : \psi(x) < 0 \}.
-\] +\]" src="form_2064.png"/>

    -

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    -

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    +

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    +

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    A detailed description of the underlying algorithm can be found in "High-Order %Quadrature Methods for Implicitly Defined Surfaces and Volumes in Hyperrectangles, R. I. Saye, SIAM J. Sci. Comput., 37(2), <a href="http://www.dx.doi.org/10.1137/140966290"> @@ -244,7 +244,7 @@

    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1668 of file quadrature_generator.cc.

    @@ -266,7 +266,7 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1677 of file quadrature_generator.cc.

    @@ -288,8 +288,8 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 1686 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-03-17 21:57:33.523173028 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-03-17 21:57:33.527173053 +0000 @@ -233,7 +233,7 @@

    where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

    Note that this class is similar to FEFieldFunction, but that FEFieldFunction implements the following function on a given cell, $K$,

    $f(x) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(F_K^{-1}(x))$,

    -

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    +

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    Before calling the value/gradient/hessian function, the set_active_cell function must be called to specify which cell the function should be evaluated on.

    Definition at line 1312 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-03-17 21:57:33.563173275 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-03-17 21:57:33.567173300 +0000 @@ -150,20 +150,20 @@

    Detailed Description

    template<int dim, int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< dim, spacedim >

    This class implements the Saye-algorithm cited in the documentation of the QuadratureGenerator class.

    -

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    -

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    -

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    -

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    -

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    +

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    +

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    +

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    +

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    +

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    throughout the whole box. This means that the zero-contour of the level set function can be parameterized by an implicit function

    -

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    +

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    so that

    -

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    -

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    -

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    +

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    +

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    +

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    When we reach the base case, $dim = 1$, the creation of QPartitioning<1> is simple. See the documentation in specialized class: QGenerator<1, spacedim>.

    As we go up through the dimensions and create the higher dimensional quadratures, we need to know the function value of the height functions at the lower dimensional quadrature points. Since the functions are implicit, we need to do root-finding on the level set functions to find the function values. For this we use the class UpThroughDimensionCreator, see documentation there.

    -

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    +

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    As we try to find a height function direction, we estimate bounds on the gradient components by approximating each component as a 1st-order Taylor-polynomial. If a direction can not be found, the box is split and we recurse on each smaller box. This makes an implicit function more likely to exist since we seek it over a smaller portion of the zero contour. It also makes the estimated bounds tighter since we extrapolate the Taylor-polynomial a shorter distance.

    Since we can not split a box forever, there is an maximum number of allowed splits on the additional data struct passed to the constructor. If this is reached, the algorithm uses the midpoint method as a last resort.

    @@ -313,7 +313,7 @@
    -

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    +

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    Definition at line 1118 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:33.599173498 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:33.599173498 +0000 @@ -151,8 +151,8 @@

    Detailed Description

    template<int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< 1, spacedim >

    The 1d-base case of the recursive algorithm QGenerator<dim, spacedim>.

    -

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    -

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    +

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    +

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    Definition at line 1208 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-03-17 21:57:33.619173621 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-03-17 21:57:33.619173621 +0000 @@ -120,18 +120,18 @@

    Detailed Description

    template<int dim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

    Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

    -

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    -

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    +

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    +

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi_i(x) < 0, \forall i \}, \\
 P = \{x \in B : \psi_i(x) > 0, \forall i \}, \\
 I = B \setminus (\overline{N} \cup \overline{P}).
-\] +\]" src="form_2088.png"/>

    -

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    -

    $S = \{x \in B : \psi(x) = 0 \}$.

    -

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    +

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    +

    $S = \{x \in B : \psi(x) = 0 \}$.

    +

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    Definition at line 753 of file quadrature_generator.h.

    Member Function Documentation

    @@ -171,7 +171,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    Definition at line 767 of file quadrature_generator.h.

    @@ -190,7 +190,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    Definition at line 773 of file quadrature_generator.h.

    @@ -228,7 +228,7 @@
    -

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    Definition at line 785 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-03-17 21:57:33.635173719 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-03-17 21:57:33.643173769 +0000 @@ -122,7 +122,7 @@  

    Detailed Description

    -

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    +

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    The bounds on the functions values are estimated using the function taylor_estimate_function_bounds, which approximates the function as a second order Taylor-polynomial around the interval midpoint. When we have a sign change on an interval, this class uses boost::math::tools::toms748_solve for finding roots .

    Definition at line 608 of file quadrature_generator.h.

    @@ -168,7 +168,7 @@ std::vector< double > & roots&#href_anchor"memdoc"> -

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    +

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    Definition at line 532 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-03-17 21:57:33.671173942 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-03-17 21:57:33.675173967 +0000 @@ -131,13 +131,13 @@

    Detailed Description

    template<int dim, int spacedim>
    -class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    -

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    -

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    -

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    -

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    -

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    -

    where $i$ is the height function direction.

    +class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    +

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    +

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    +

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    +

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    +

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    +

    where $i$ is the height function direction.

    Definition at line 828 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    @@ -197,7 +197,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    +

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    Definition at line 725 of file quadrature_generator.cc.

    @@ -270,7 +270,7 @@

    Create a surface quadrature point from the lower-dimensional point and add it to surface_quadrature.

    -

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    +

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    Definition at line 781 of file quadrature_generator.cc.

    @@ -379,7 +379,7 @@
    -

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    +

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    Definition at line 898 of file quadrature_generator.h.

    @@ -433,7 +433,7 @@
    -

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    +

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    Definition at line 911 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-03-17 21:57:33.711174189 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-03-17 21:57:33.711174189 +0000 @@ -523,10 +523,10 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_jacobian_system().

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    +F/\partial u$" src="form_2213.png"/>. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    Parameters
    - +
    current_uCurrent value of $u$
    current_uCurrent value of $u$
    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-03-17 21:57:33.739174362 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-03-17 21:57:33.743174387 +0000 @@ -350,7 +350,7 @@
    -

    Relative $l_2$ tolerance of the residual to be reached.

    +

    Relative $l_2$ tolerance of the residual to be reached.

    Note
    Solver terminates successfully if either the function tolerance or the relative tolerance has been reached.

    Definition at line 186 of file nonlinear.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-03-17 21:57:33.803174757 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-03-17 21:57:33.795174708 +0000 @@ -573,7 +573,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -603,24 +603,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -629,11 +629,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-03-17 21:57:33.855175078 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-03-17 21:57:33.863175128 +0000 @@ -500,7 +500,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -509,7 +509,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-03-17 21:57:33.919175474 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-03-17 21:57:33.919175474 +0000 @@ -448,7 +448,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -637,7 +637,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -667,24 +667,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -693,11 +693,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-03-17 21:57:33.979175844 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-03-17 21:57:33.987175894 +0000 @@ -494,7 +494,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -503,7 +503,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-03-17 21:57:34.047176265 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-03-17 21:57:34.043176240 +0000 @@ -494,7 +494,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -503,7 +503,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-03-17 21:57:34.103176610 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-03-17 21:57:34.103176610 +0000 @@ -273,7 +273,7 @@

    Detailed Description

    template<typename VectorType>
    class PArpackSolver< VectorType >

    Interface for using PARPACK. PARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines pdneupd, pdseupd, pdnaupd, pdsaupd of PARPACK. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes in the following way:

    const unsigned int num_arnoldi_vectors = 2*size_of_spectrum + 2;
    @@ -298,7 +298,7 @@
    const AdditionalData additional_data
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

    The OP can be specified by using a LinearOperator:

    const double shift = 5.0;
    const auto op_A = linear_operator<vector_t>(A);
    @@ -631,7 +631,7 @@ const unsigned int n_eigenvalues&#href_anchor"memdoc"> -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    In mode=3, inverse should correspond to $[A-\sigma B]^{-1}$, whereas in mode=2 it should represent $B^{-1}$. For mode=1 both B and inverse are ignored.

    Definition at line 770 of file parpack_solver.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-03-17 21:57:34.131176783 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-03-17 21:57:34.131176783 +0000 @@ -291,7 +291,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

    Reinitialization that takes the number of locally-owned degrees of freedom local_size and an index set for the required ghost indices ghost_indices.

    -

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    +

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    The export_to_ghost_array will populate an array containing values from locally-owned AND ghost indices, as for the relevant set of dofs of a usual FEM simulation.

    Definition at line 46 of file petsc_communication_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-03-17 21:57:34.191177154 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-03-17 21:57:34.199177204 +0000 @@ -1505,8 +1505,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1534,8 +1534,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -1591,7 +1591,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1622,7 +1622,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-03-17 21:57:34.271177648 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-03-17 21:57:34.279177697 +0000 @@ -875,7 +875,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 410 of file petsc_block_sparse_matrix.h.

    @@ -987,7 +987,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 441 of file petsc_block_sparse_matrix.h.

    @@ -2037,7 +2037,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2142,7 +2142,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2611,7 +2611,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2719,7 +2719,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-03-17 21:57:34.347178118 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-03-17 21:57:34.351178142 +0000 @@ -1933,7 +1933,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1985,7 +1985,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -2011,7 +2011,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -2037,7 +2037,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-03-17 21:57:34.427178612 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-03-17 21:57:34.431178636 +0000 @@ -814,7 +814,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    @@ -837,7 +837,7 @@ const Vector & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Definition at line 815 of file petsc_parallel_sparse_matrix.cc.

    @@ -2059,8 +2059,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -2088,8 +2088,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2145,7 +2145,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2176,7 +2176,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-03-17 21:57:34.503179082 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-03-17 21:57:34.511179130 +0000 @@ -1929,7 +1929,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 612 of file petsc_vector_base.cc.

    @@ -1985,7 +1985,7 @@
    -

    $l_1$-norm of the vector. The sum of the absolute values.

    +

    $l_1$-norm of the vector. The sum of the absolute values.

    Note
    In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

    Definition at line 672 of file petsc_vector_base.cc.

    @@ -2014,7 +2014,7 @@
    -

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    +

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    Definition at line 685 of file petsc_vector_base.cc.

    @@ -2042,7 +2042,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    Definition at line 698 of file petsc_vector_base.cc.

    @@ -2070,7 +2070,7 @@
    -

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    +

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    Definition at line 740 of file petsc_vector_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-03-17 21:57:34.567179476 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-03-17 21:57:34.571179501 +0000 @@ -1290,8 +1290,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1311,8 +1311,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -1352,7 +1352,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1376,7 +1376,7 @@ const VectorBase & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-03-17 21:57:34.643179946 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-03-17 21:57:34.647179971 +0000 @@ -1949,8 +1949,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1978,8 +1978,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2035,7 +2035,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2066,7 +2066,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-03-17 21:57:34.683180194 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-03-17 21:57:34.691180242 +0000 @@ -181,7 +181,7 @@
    Mat & petsc_matrix();
    ...

    In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

    -

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    +

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    The default linearization procedure of a solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations. For details, consult the PETSc manual.

    In alternative, users can also provide the implementation of the Jacobian. This can be accomplished in two ways:

    -

    By default, this class assumes that all components are differential, and that you want to solve a standard ode. In this case, the initial component type is set to use_y_diff, so that the y_dot at time t=initial_time is computed by solving the nonlinear problem $F(y_dot,
-y(t0), t0) = 0$ in the variable y_dot.

    +

    By default, this class assumes that all components are differential, and that you want to solve a standard ode. In this case, the initial component type is set to use_y_diff, so that the y_dot at time t=initial_time is computed by solving the nonlinear problem $F(y_dot,
+y(t0), t0) = 0$ in the variable y_dot.

    Notice that a Newton solver is used for this computation. The Newton solver parameters can be tweaked by acting on ic_alpha and ic_max_iter.

    If you reset the solver at some point, you may want to select a different computation for the initial conditions after reset. Say, for example, that you have refined a grid, and after transferring the solution to the new grid, the initial conditions are no longer consistent. Then you can choose how these are made consistent, using the same three options that you used for the initial conditions in reset_type.

    Parameters
    @@ -541,7 +541,7 @@
    -

    Compute residual. Return $F(t, y, \dot y)$.

    +

    Compute residual. Return $F(t, y, \dot y)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 662 of file ida.h.

    @@ -563,13 +563,13 @@

    Compute Jacobian. This function is called by IDA any time a Jacobian update is required. The user should compute the Jacobian (or update all the variables that allow the application of the Jacobian). This function is called by IDA once, before any call to solve_jacobian_system() or solve_with_jacobian().

    The Jacobian $J$ should be a (possibly inexact) computation of

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    If the user uses a matrix based computation of the Jacobian, then this is the right place where an assembly routine should be called to assemble both a matrix and a preconditioner for the Jacobian system. Subsequent calls (possibly more than one) to solve_jacobian_system() or solve_with_jacobian() can assume that this function has been called at least once.

    -

    Notice that no assumption is made by this interface on what the user should do in this function. IDA only assumes that after a call to setup_jacobian() it is possible to call solve_jacobian_system() or solve_with_jacobian() to obtain a solution $x$ to the system $J x = b$.

    +

    Notice that no assumption is made by this interface on what the user should do in this function. IDA only assumes that after a call to setup_jacobian() it is possible to call solve_jacobian_system() or solve_with_jacobian() to obtain a solution $x$ to the system $J x = b$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 701 of file ida.h.

    @@ -591,12 +591,12 @@

    Solve the Jacobian linear system. This function will be called by IDA (possibly several times) after setup_jacobian() has been called at least once. IDA tries to do its best to call setup_jacobian() the minimum amount of times. If convergence can be achieved without updating the Jacobian, then IDA does not call setup_jacobian() again. If, on the contrary, internal IDA convergence tests fail, then IDA calls again setup_jacobian() with updated vectors and coefficients so that successive calls to solve_jacobian_systems() lead to better convergence in the Newton process.

    The jacobian $J$ should be (an approximation of) the system Jacobian

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., J*dst = src. It is the users responsibility to set up proper solvers and preconditioners inside this function.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., J*dst = src. It is the users responsibility to set up proper solvers and preconditioners inside this function.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    Deprecated
    Use solve_with_jacobian() instead which also uses a numerical tolerance.
    @@ -619,21 +619,21 @@

    Solve the Jacobian linear system up to a specified tolerance. This function will be called by IDA (possibly several times) after setup_jacobian() has been called at least once. IDA tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then IDA does not call setup_jacobian() again. If, on the contrary, internal IDA convergence tests fail, then IDA calls again setup_jacobian() with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    The Jacobian $J$ should be (an approximation of) the system Jacobian

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    -

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., the solution of the linear system J*dst = src. It is the user's responsibility to set up proper solvers and preconditioners either inside this function, or already within the setup_jacobian() function. (The latter is, for example, what the step-77 program does: All expensive operations happen in setup_jacobian(), given that that function is called far less often than the current one.)

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., the solution of the linear system J*dst = src. It is the user's responsibility to set up proper solvers and preconditioners either inside this function, or already within the setup_jacobian() function. (The latter is, for example, what the step-77 program does: All expensive operations happen in setup_jacobian(), given that that function is called far less often than the current one.)

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 781 of file ida.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-03-17 21:57:36.535191632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-03-17 21:57:36.535191632 +0000 @@ -551,8 +551,8 @@

    Type of correction for initial conditions.

    -

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    -

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    +

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    +

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    Definition at line 523 of file ida.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-03-17 21:57:36.571191855 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-03-17 21:57:36.571191855 +0000 @@ -175,48 +175,48 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class SUNDIALS::KINSOL< VectorType >

    Interface to SUNDIALS' nonlinear solver (KINSOL).

    -

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
-= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    +

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
+= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    KINSOL's Newton solver employs the inexact Newton method. As this solver is intended mainly for large systems, the user is required to provide their own solver function.

    At the highest level, KINSOL implements the following iteration scheme:

      -
    • set $u_0$ = an initial guess
    • -
    • For $n = 0, 1, 2, \ldots$ until convergence do:
        -
      • Solve $J(u_n)\delta_n = -F(u_n)$
      • -
      • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
      • +
      • set $u_0$ = an initial guess
      • +
      • For $n = 0, 1, 2, \ldots$ until convergence do:
          +
        • Solve $J(u_n)\delta_n = -F(u_n)$
        • +
        • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
        • Test for convergence
      -

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      +

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      Unless specified otherwise by the user, KINSOL strives to update Jacobian information as infrequently as possible to balance the high costs of matrix operations against other costs. Specifically, these updates occur when:

      • the problem is initialized,
      • -
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • +
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • a specified number of nonlinear iterations have passed since the last update,
      • the linear solver failed recoverably with outdated Jacobian information,
      • the global strategy failed with outdated Jacobian information, or
      • -
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.
      • +
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.

      KINSOL allows changes to the above strategy through optional solver inputs. The user can disable the initial Jacobian information evaluation or change the default value of the number of nonlinear iterations after which a Jacobian information update is enforced.

      -

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      +

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      When scaling values are provided for the solution vector, these values are automatically incorporated into the calculation of the perturbations used for the default difference quotient approximations for Jacobian information if the user does not supply a Jacobian solver through the solve_with_jacobian() function.

      -

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      +

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      The basic fixed-point iteration scheme implemented in KINSOL is given by:

        -
      • Set $u_0 =$ an initial guess
      • -
      • For $n = 0, 1, 2, \dots$ until convergence do:
          -
        • Set $u_{n+1} = G(u_n)$
        • +
        • Set $u_0 =$ an initial guess
        • +
        • For $n = 0, 1, 2, \dots$ until convergence do:
            +
          • Set $u_{n+1} = G(u_n)$
          • Test for convergence
        -

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        -

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        -

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        +

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        +

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        +

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        The user has to provide the implementation of the following std::functions:

        • reinit_vector; and only one of
        • residual; or
        • iteration_function;
        -

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        +

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        If the use of a Newton or Picard method is desired, then the user should also supply

        • solve_jacobian_system or solve_with_jacobian; and optionally
        • setup_jacobian;
        • @@ -440,12 +440,12 @@

          A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian().

          The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

          In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

          +F/\partial u$" src="form_2213.png"/>. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

          The setup_jacobian() function may call a user-supplied function, or a function within the linear solver module, to compute Jacobian-related data that is required by the linear solver. It may also preprocess that data as needed for solve_with_jacobian(), which may involve calling a generic function (such as for LU factorization) or, more generally, build preconditioners from the assembled Jacobian. In any case, the data so generated may then be used whenever a linear system is solved.

          The point of this function is that setup_jacobian() function is not called at every Newton iteration, but only as frequently as the solver determines that it is appropriate to perform the setup task. In this way, Jacobian-related data generated by setup_jacobian() is expected to be used over a number of Newton iterations. KINSOL determines itself when it is beneficial to regenerate the Jacobian and associated information (such as preconditioners computed for the Jacobian), thereby saving the effort to regenerate the Jacobian matrix and a preconditioner for it whenever possible.

          Parameters
          - +
          current_uCurrent value of $u$
          current_uCurrent value of $u$
          current_fCurrent value of $F(u)$ or $G(u)$
          @@ -472,14 +472,14 @@
          Deprecated
          Versions of SUNDIALS after 4.0 no longer provide all of the information necessary for this callback (see below). Use the solve_with_jacobian callback described below.

          A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_jacobian_system() lead to better convergence in the Newton process.

          If you do not specify a solve_jacobian_system or solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

          -

          A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above).

          +

          A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above).

          Arguments to the function are:

          Parameters
          - - + + - +
          [in]ycurThe current $y$ vector for the current KINSOL internal step. In the documentation above, this $y$ vector is generally denoted by $u$.
          [in]fcurThe current value of the implicit right-hand side at ycur, $f_I (t_n, ypred)$.
          [in]ycurThe current $y$ vector for the current KINSOL internal step. In the documentation above, this $y$ vector is generally denoted by $u$.
          [in]fcurThe current value of the implicit right-hand side at ycur, $f_I (t_n, ypred)$.
          [in]rhsThe system right hand side to solve for
          [out]dstThe solution of $J^{-1} * src$
          [out]dstThe solution of $J^{-1} * src$
          @@ -510,12 +510,12 @@

    A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    If you do not specify a solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    @@ -540,7 +540,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    +

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    If no function is provided to a KINSOL object, then this is interpreted as implicitly saying that all of these scaling factors should be considered as one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -562,7 +562,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the function evaluation away from the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    +

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 691 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-03-17 21:57:36.655192373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-03-17 21:57:36.663192423 +0000 @@ -362,15 +362,15 @@

    Detailed Description

    template<typename NumberType>
    class ScaLAPACKMatrix< NumberType >

    A wrapper class around ScaLAPACK parallel dense linear algebra.

    -

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    -

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    +

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    +

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    Block-Cyclic Distribution
    -

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    +

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    The choice of the block sizes is a compromise between a sufficiently large size for efficient local/serial BLAS, but one that is also small enough to achieve good parallel load balance.

    Below we show a strong scaling example of ScaLAPACKMatrix::invert() on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets 2.20GHz, 10 cores/socket. Calculations are performed on square processor grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10.

    @@ -615,7 +615,7 @@

    Constructor for a rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 81 of file scalapack.cc.

    @@ -652,7 +652,7 @@

    Constructor for a square matrix of size size, and distributed using the process grid in process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 106 of file scalapack.cc.

    @@ -690,7 +690,7 @@

    Constructor for a general rectangular matrix that is read from the file filename and distributed using the grid process_grid.

    Loads the matrix from file filename using HDF5. In case that deal.II was built without HDF5 a call to this function will cause an exception to be thrown.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 122 of file scalapack.cc.

    @@ -766,7 +766,7 @@

    Initialize the rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 217 of file scalapack.cc.

    @@ -803,7 +803,7 @@

    Initialize the square matrix of size size and distributed using the grid process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 291 of file scalapack.cc.

    @@ -1047,9 +1047,9 @@
    -

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 981 of file scalapack.cc.

    @@ -1087,13 +1087,13 @@ transpose_B Block Sizes Operation -false $MB_A=MB_B$
    - $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ +false $MB_A=MB_B$
    + $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ -true $MB_A=NB_B$
    - $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ +true $MB_A=NB_B$
    + $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    Definition at line 991 of file scalapack.cc.

    @@ -1116,9 +1116,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    Definition at line 1047 of file scalapack.cc.

    @@ -1141,9 +1141,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 1057 of file scalapack.cc.

    @@ -1192,24 +1192,24 @@ transpose_A transpose_B Block Sizes Operation -false false $MB_A=MB_C$
    - $NB_A=MB_B$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ +false false $MB_A=MB_C$
    + $NB_A=MB_B$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ -false true $MB_A=MB_C$
    - $NB_A=NB_B$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ +false true $MB_A=MB_C$
    + $NB_A=NB_B$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ -true false $MB_A=MB_B$
    - $NB_A=MB_C$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ +true false $MB_A=MB_B$
    + $NB_A=MB_C$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ -true true $MB_A=NB_B$
    - $NB_A=MB_C$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ +true true $MB_A=NB_B$
    + $NB_A=MB_C$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    Definition at line 1067 of file scalapack.cc.

    @@ -1238,11 +1238,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    Definition at line 1184 of file scalapack.cc.

    @@ -1271,11 +1271,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of $\mathbf{A}$.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    Definition at line 1198 of file scalapack.cc.

    @@ -1303,12 +1303,12 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-03-17 21:57:36.711192719 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-03-17 21:57:36.719192769 +0000 @@ -259,7 +259,7 @@
    Vector<double> solution_1d;
    -

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;
    +

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;

    then in order to evaluate the boundary conditions for this 2d problem, we would want to call VectorTools::interpolate_boundary_values() via

    AffineConstraints<double> boundary_values_2d;
    123,
    @@ -267,7 +267,7 @@
    boundary_values_2d);
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
    -

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    solution_1d_as_function_object (dof_handler_1d, solution_1d);
    auto boundary_evaluator
    = [&] (const Point<2> &p)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-03-17 21:57:36.755192990 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-03-17 21:57:36.759193015 +0000 @@ -212,7 +212,7 @@ \rho^{(k)} &\dealcoloneq \frac{1}{y^{(k)} \cdot s^{(k)}} \end{align*}" src="form_2417.png"/>

    -

    for a symmetric positive definite $H$. Limited memory variant is implemented via the two-loop recursion.

    +

    for a symmetric positive definite $H$. Limited memory variant is implemented via the two-loop recursion.

    Definition at line 58 of file solver_bfgs.h.

    Member Typedef Documentation

    @@ -366,8 +366,8 @@ \]" src="form_2418.png"/>

    starting from initial state x.

    -

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
-\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    +

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
+\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    @@ -388,7 +388,7 @@

    Connect a slot to perform a custom line-search.

    -

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    +

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html 2024-03-17 21:57:36.799193263 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html 2024-03-17 21:57:36.807193312 +0000 @@ -1034,7 +1034,7 @@
    -

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    +

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    Definition at line 323 of file solver_cg.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-03-17 21:57:36.851193584 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-03-17 21:57:36.851193584 +0000 @@ -196,27 +196,27 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    +class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    FIRE is a damped dynamics method described in Structural Relaxation Made Simple by Bitzek et al. 2006, typically used to find stable equilibrium configurations of atomistic systems in computational material science. Starting from a given initial configuration of the atomistic system, the algorithm relies on inertia to obtain (nearest) configuration with least potential energy.

    Notation:

      -
    • The global vector of unknown variables: $\mathbf x$.
    • -
    • Objective function: $E(\mathbf x)$.
    • +
    • The global vector of unknown variables: $\mathbf x$.
    • +
    • Objective function: $E(\mathbf x)$.
    • Rate of change of unknowns: $\mathbf v$.
    • -
    • Gradient of the objective function w.r.t unknowns: $\mathbf g = \nabla E(\mathbf x)$.
    • -
    • Mass matrix: $\mathbf M$.
    • -
    • Initial guess of unknowns: $\mathbf x_0$.
    • -
    • Time step: $\Delta t$.
    • +
    • Gradient of the objective function w.r.t unknowns: $\mathbf g = \nabla E(\mathbf x)$.
    • +
    • Mass matrix: $\mathbf M$.
    • +
    • Initial guess of unknowns: $\mathbf x_0$.
    • +
    • Time step: $\Delta t$.
    -

    Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      -
    1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
    2. -
    3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
      - $\mathbf x = \mathbf x + \Delta t \mathbf v$,
      - $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
    4. -
    5. Calculate $p = \mathbf g \cdot \mathbf v$.
    6. -
    7. Set $\mathbf v = (1-\alpha) \mathbf v
-                  + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
    8. -
    9. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
    10. -
    11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
    12. +

      Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

        +
      1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
      2. +
      3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        + $\mathbf x = \mathbf x + \Delta t \mathbf v$,
        + $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
      4. +
      5. Calculate $p = \mathbf g \cdot \mathbf v$.
      6. +
      7. Set $\mathbf v = (1-\alpha) \mathbf v
+                  + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
      8. +
      9. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      10. +
      11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
      12. Return to 1.

      Also see Energy-Minimization in Atomic-to-Continuum Scale-Bridging Methods by Eidel et al. 2011.

      @@ -406,8 +406,8 @@ const PreconditionerType & preconditioner&#href_anchor"memdoc"> -

      Solve for x that minimizes $E(\mathbf x)$ for the special case when $E(\mathbf x)
-= \frac{1}{2} \mathbf x^{T} \mathbf A \mathbf x - \mathbf x^{T} \mathbf b$.

      +

      Solve for x that minimizes $E(\mathbf x)$ for the special case when $E(\mathbf x)
+= \frac{1}{2} \mathbf x^{T} \mathbf A \mathbf x - \mathbf x^{T} \mathbf b$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 2024-03-17 21:57:36.891193831 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 2024-03-17 21:57:36.891193831 +0000 @@ -217,11 +217,11 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFlexibleCG< VectorType >

    This class implements a flexible variant of the conjugate gradient method, which is based on a different formula to compute $\beta_k$ in the process of constructing a new search direction that is A-orthogonal against the previous one. Rather than using the Fletcher–Reeves update formula with $\beta_k = \frac{\mathbf{r}^T_{k+1} \mathbf{z}_{k+1}}{\mathbf{r}^T_{k}
-\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
-P^{-1} \mathbf{r}_{k+1}$) as in the classical conjugate gradient algorithm, this class selects the Polak-Ribiere formula $\beta_k =
+class SolverFlexibleCG< VectorType ></div><p>This class implements a flexible variant of the conjugate gradient method, which is based on a different formula to compute <picture><source srcset=$\beta_k$ in the process of constructing a new search direction that is A-orthogonal against the previous one. Rather than using the Fletcher–Reeves update formula with $\beta_k = \frac{\mathbf{r}^T_{k+1} \mathbf{z}_{k+1}}{\mathbf{r}^T_{k}
+\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
+P^{-1} \mathbf{r}_{k+1}$) as in the classical conjugate gradient algorithm, this class selects the Polak-Ribiere formula $\beta_k =
 \frac{\mathbf{r}^T_{k+1} \left(\mathbf{z}_{k+1} -
-\mathbf{z}_{k}\right)}{\mathbf{r}^T_{k} \mathbf{z}_{k}}$. The additional term $\mathbf{r}^T_{k+1} \mathbf{z}_{k}$ is zero for linear symmetric-positive definite preconditioners due to the construction of the search directions, so the behavior of SolverFlexibleCG is equivalent for those kinds of situations and merely increases costs by requiring an additional stored vector and associated vector operations. While there are no theoretical guarantees for convergence as in the classical CG algorithm, the current class has been documented to be much more robust for variable preconditioners (e.g., involving some iterative inverse that is not fully converged) or a preconditioner with some slight non-symmetry (like weighted Schwarz methods), which results from the local optimality of the search direction with at least as good progress as the locally optimal steepest descent method.

    +\mathbf{z}_{k}\right)}{\mathbf{r}^T_{k} \mathbf{z}_{k}}$" src="form_1891.png"/>. The additional term $\mathbf{r}^T_{k+1} \mathbf{z}_{k}$ is zero for linear symmetric-positive definite preconditioners due to the construction of the search directions, so the behavior of SolverFlexibleCG is equivalent for those kinds of situations and merely increases costs by requiring an additional stored vector and associated vector operations. While there are no theoretical guarantees for convergence as in the classical CG algorithm, the current class has been documented to be much more robust for variable preconditioners (e.g., involving some iterative inverse that is not fully converged) or a preconditioner with some slight non-symmetry (like weighted Schwarz methods), which results from the local optimality of the search direction with at least as good progress as the locally optimal steepest descent method.

    Definition at line 354 of file solver_cg.h.

    Member Typedef Documentation

    @@ -995,7 +995,7 @@
    -

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    +

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    Definition at line 323 of file solver_cg.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-03-17 21:57:36.931194079 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-03-17 21:57:36.935194102 +0000 @@ -427,7 +427,7 @@ const PreconditionerType & preconditioner&#href_anchor"memdoc"> -

    Solve $A^Tx=b$ for $x$.

    +

    Solve $A^Tx=b$ for $x$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-03-17 21:57:36.975194350 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-03-17 21:57:36.983194399 +0000 @@ -576,7 +576,7 @@

    The solution will be returned in place of the right hand side vector.

    Parameters
    - +
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in]transposeIf set to true, this function solves the linear $A^T x = b$ instead of $Ax=b$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-03-17 21:57:37.059194868 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-03-17 21:57:37.063194893 +0000 @@ -1901,7 +1901,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2153,7 +2153,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2186,7 +2186,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2263,7 +2263,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2334,8 +2334,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -2363,8 +2363,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-03-17 21:57:37.143195387 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-03-17 21:57:37.147195412 +0000 @@ -1704,7 +1704,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2049,7 +2049,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2082,7 +2082,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2159,7 +2159,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2230,8 +2230,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -2259,8 +2259,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-03-17 21:57:37.231195932 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-03-17 21:57:37.235195955 +0000 @@ -426,8 +426,8 @@
    template<typename number>
    class SparseMIC< number >

    Implementation of the Modified Incomplete Cholesky (MIC(0)) preconditioner for symmetric matrices. This class conforms to the state and usage specification in SparseLUDecomposition.

    The decomposition

    -

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
-- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    +

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
+- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    Definition at line 46 of file sparse_mic.h.

    Member Typedef Documentation

    @@ -1966,7 +1966,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2218,7 +2218,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2251,7 +2251,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2328,7 +2328,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2399,8 +2399,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -2428,8 +2428,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-03-17 21:57:37.311196425 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-03-17 21:57:37.315196450 +0000 @@ -1452,7 +1452,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1812,7 +1812,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1838,7 +1838,7 @@ const Vector< somenumber > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1901,7 +1901,7 @@ const bool rebuild_sparsity_pattern = true&#href_anchor"memdoc">

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -1957,8 +1957,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -1978,8 +1978,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-03-17 21:57:37.375196820 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-03-17 21:57:37.383196869 +0000 @@ -1206,7 +1206,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    @@ -1229,7 +1229,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    @@ -1252,7 +1252,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1275,7 +1275,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    @@ -1384,7 +1384,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    +

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    @@ -1412,7 +1412,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    +

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    @@ -1447,7 +1447,7 @@
    -

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    +

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    This function requires that B has a const_iterator traversing all matrix entries and that A has a function el(i,j) for access to a specific entry.

    Definition at line 1461 of file sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-03-17 21:57:37.419197092 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-03-17 21:57:37.423197117 +0000 @@ -143,7 +143,7 @@

    The typical use for these iterators is to iterate over the elements of a sparse matrix or over the elements of individual rows. Note that there is no guarantee that the elements of a row are actually traversed in an order in which columns monotonically increase. See the documentation of the SparsityPattern class for more information.

    The first template argument denotes the underlying numeric type, the second the constness of the matrix.

    Since there is a specialization of this class for Constness=false, this class is for iterators to constant matrices.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 347 of file sparse_matrix.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-03-17 21:57:37.475197438 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-03-17 21:57:37.475197438 +0000 @@ -1161,7 +1161,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half bandwidth" in the literature.

    Definition at line 674 of file sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-03-17 21:57:37.511197660 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-03-17 21:57:37.515197685 +0000 @@ -160,7 +160,7 @@

    Detailed Description

    An iterator class for walking over the elements of a sparsity pattern.

    The typical use for these iterators is to iterate over the elements of a sparsity pattern (or, since they also serve as the basis for iterating over the elements of an associated matrix, over the elements of a sparse matrix), or over the elements of individual rows. There is no guarantee that the elements of a row are actually traversed in an order in which column numbers monotonically increase. See the documentation of the SparsityPattern class for more information.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 280 of file sparsity_pattern.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-03-17 21:57:37.567198006 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-03-17 21:57:37.571198031 +0000 @@ -219,20 +219,20 @@ class SphericalManifold< dim, spacedim >

    Manifold description for a spherical space coordinate system.

    You can use this Manifold object to describe any sphere, circle, hypersphere or hyperdisc in two or three dimensions. This manifold can be used as a co-dimension one manifold descriptor of a spherical surface embedded in a higher dimensional space, or as a co-dimension zero manifold descriptor for a body with positive volume, provided that the center of the spherical space is excluded from the domain. An example for the use of this function would be in the description of a hyper-shell or hyper-ball geometry, for example after creating a coarse mesh using GridGenerator::hyper_ball(). (However, it is worth mentioning that generating a good mesh for a disk or ball is complicated and requires addition steps. See the "Possibilities for extensions" section of step-6 for an extensive discussion of how one would construct such meshes and what one needs to do for it.)

    The two template arguments match the meaning of the two template arguments in Triangulation<dim, spacedim>, however this Manifold can be used to describe both thin and thick objects, and the behavior is identical when dim <= spacedim, i.e., the functionality of SphericalManifold<2,3> is identical to SphericalManifold<3,3>.

    -

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    +

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    These two points would be connected (using a PolarManifold) by the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   s: [0,1]  & \rightarrow &  \mathbb S^3 \\
           t & \mapsto     &  (1,\pi/3,0) + (0,0,t\pi)
-\end{align*} +\end{align*}" src="form_1449.png"/>

    This curve is not a geodesic on the sphere, and it is not how we would connect those two points. A better curve, would be the one passing through the North pole:

    -\[
+<picture><source srcset=\[
  s(t) = x_1 \cos(\alpha(t)) + \kappa \times x_1 \sin(\alpha(t)) +
  \kappa ( \kappa \cdot x_1) (1-\cos(\alpha(t))).
-\] +\]" src="form_1450.png"/>

    -

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    +

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    In particular, this class implements a Manifold that joins any two points in space by first projecting them onto the surface of a sphere with unit radius, then connecting them with a geodesic, and finally rescaling the final radius so that the resulting one is the weighted average of the starting radii. This Manifold is identical to PolarManifold in dimension two, while for dimension three it returns points that are more uniformly distributed on the sphere, and it is invariant with respect to rotations of the coordinate system, therefore avoiding the problems that PolarManifold has at the poles. Notice, in particular, that computing tangent vectors at the poles with a PolarManifold is not well defined, while it is perfectly fine with this class.

    For mathematical reasons, it is impossible to construct a unique map of a sphere using only geodesic curves, and therefore, using this class with MappingManifold is discouraged. If you use this Manifold to describe the geometry of a sphere, you should use MappingQ as the underlying mapping, and not MappingManifold.

    This Manifold can be used only on geometries where a ball with finite radius is removed from the center. Indeed, the center is a singular point for this manifold, and if you try to connect two points across the center, they would travel on spherical coordinates, avoiding the center.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-03-17 21:57:37.663198599 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-03-17 21:57:37.659198574 +0000 @@ -301,7 +301,7 @@ std::ostream &&#href_anchor"memTemplItemRight" valign="bottom">operator<< (std::ostream &out, const SymmetricTensor< 4, dim, Number > &t) &#href_anchor"details" id="details">

    Detailed Description

    template<int rank_, int dim, typename Number>
    -class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    +class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    Using this class for symmetric tensors of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It is also more efficient than using the more general Tensor class, since fewer elements are stored, and the class automatically makes sure that the tensor represents a symmetric object.

    For tensors of higher rank, the savings in storage are even higher. For example for the $3 \times 3 \times 3 \times 3$ tensors of rank 4, only 36 instead of the full 81 entries have to be stored. These rank 4 tensors are denoted by blackboard-style upper-case Latin letters such as $\mathbb A$ with components $\mathcal{A}_{ijkl}$.

    While the definition of a symmetric rank-2 tensor is obvious, tensors of rank 4 are considered symmetric if they are operators mapping symmetric rank-2 tensors onto symmetric rank-2 tensors. This so-called minor symmetry of the rank 4 tensor requires that for every set of four indices $i, j, k, l$, the identity $\mathcal{C}_{ijkl} = \mathcal{C}_{jikl} =
@@ -630,7 +630,7 @@
   </tr>
 </table>
 </div><div class= -

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    +

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    @@ -892,8 +892,8 @@
    -

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    -

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    +

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    +

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    Note that the multiplication operator for symmetric tensors is defined to be a double contraction over two indices, while it is defined as a single contraction over only one index for regular Tensor objects. For symmetric tensors it therefore acts in a way that is commonly denoted by a "colon multiplication" in the mathematical literature (the two dots of the colon suggesting that it is a contraction over two indices), which corresponds to a scalar product between tensors.

    It is worth pointing out that this definition of operator* between symmetric tensors is different to how the (in general non-symmetric) Tensor class defines operator*, namely as the single-contraction product over the last index of the first operand and the first index of the second operand. For the double contraction of Tensor objects, you will need to use the double_contract() function.

    To maintain at least a modicum of resemblance between the interfaces of Tensor and SymmetricTensor, there are also global functions double_contract() for symmetric tensors that then do the same work as this operator. However, rather than returning the result as a return value, they write it into the first argument to the function in the same way as the corresponding functions for the Tensor class do things.

    @@ -1237,7 +1237,7 @@
    -

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    +

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    @@ -1457,7 +1457,7 @@
    -

    Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

    +

    Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

    \[
   \mathbb S : \mathbf A = \mathbf A
 \] @@ -1857,7 +1857,7 @@

    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    For the kind of arguments to this function, i.e., a rank-2 tensor of size 1, the result is simply zero.

    @@ -1889,11 +1889,11 @@
    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    + = A_{11} A_{22} - A_{12}^2$" src="form_810.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    Definition at line 2917 of file symmetric_tensor.h.

    @@ -1923,7 +1923,7 @@
    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    Definition at line 2934 of file symmetric_tensor.h.

    @@ -1982,8 +1982,8 @@
    -

    Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    -

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    +

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

    Warning
    The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
    @@ -3051,7 +3051,7 @@ Initial value:

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    +

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    Definition at line 743 of file symmetric_tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-03-17 21:57:37.711198895 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-03-17 21:57:37.719198945 +0000 @@ -232,7 +232,7 @@

    In some way, this class is similar to the Tensor class, in that it templatizes on the number of dimensions. However, there are two major differences. The first is that the Tensor class stores only numeric values (as doubles), while the Table class stores arbitrary objects. The second is that the Tensor class has fixed sizes in each dimension, also given as a template argument, while this class can handle arbitrary and different sizes in each dimension.

    This has two consequences. First, since the size is not known at compile time, it has to do explicit memory allocation. Second, the layout of individual elements is not known at compile time, so access is slower than for the Tensor class where the number of elements are their location is known at compile time and the compiler can optimize with this knowledge (for example when unrolling loops). On the other hand, this class is of course more flexible, for example when you want a two-dimensional table with the number of rows equal to the number of degrees of freedom on a cell, and the number of columns equal to the number of quadrature points. Both numbers may only be known at run-time, so a flexible table is needed here. Furthermore, you may want to store, say, the gradients of shape functions, so the data type is not a single scalar value, but a tensor itself.

    Dealing with large data sets

    -

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    +

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    If you need to load such tables on single-processor (or multi-threaded) jobs, then there is nothing you can do about the size of these tables: The table just has to fit into memory. But, if your program is parallelized via MPI, then a typical first implementation would create a table object on every process and fill it on every MPI process by reading the data from a file. This is inefficient from two perspectives:

    • You will have a lot of processes that are all trying to read from the same file at the same time.
    • In most cases, the data stored on every process is the same, and while every process needs to be able to read from a table, it is not necessary that every process stores its own table: All MPI processes that happen to be located on the same machine might as well store only one copy and make it available to each other via shared memory; in this model, only one MPI process per machine needs to store the data, and all other processes could then access it.
    • /usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 2024-03-17 21:57:37.755199167 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 2024-03-17 21:57:37.759199192 +0000 @@ -200,7 +200,7 @@

      Two (or more) columns may be merged into a "supercolumn" by twice (or multiple) calling add_column_to_supercolumn(), see there. Additionally there is a function to set for each column the precision of the output of numbers, and there are several functions to prescribe the format and the captions the columns are written with in tex mode.

      A detailed explanation of this class is also given in the step-13 tutorial program.

      Example

      -

      This is a simple example demonstrating the usage of this class. The first column includes the numbers $i=1 \dots n$, the second $1^2 \dots n^2$, the third $\sqrt{1}\dots\sqrt{n}$, where the second and third columns are merged into one supercolumn with the superkey squares and roots. Additionally the first column is aligned to the right (the default was centered) and the precision of the square roots are set to be 6 (instead of 4 as default).

      +

      This is a simple example demonstrating the usage of this class. The first column includes the numbers $i=1 \dots n$, the second $1^2 \dots n^2$, the third $\sqrt{1}\dots\sqrt{n}$, where the second and third columns are merged into one supercolumn with the superkey squares and roots. Additionally the first column is aligned to the right (the default was centered) and the precision of the square roots are set to be 6 (instead of 4 as default).

      for (unsigned int i = 1; i <= n; ++i)
      {
      @@ -231,9 +231,9 @@

      When generating output, TableHandler expects that all columns have the exact same number of elements in it so that the result is in fact a table. This assumes that in each of the iterations (time steps, nonlinear iterations, etc) you fill every single column. On the other hand, this may not always be what you want to do. For example, it could be that the function that computes the nonlinear residual is only called every few time steps; or, a function computing statistics of the mesh is only called whenever the mesh is in fact refined. In these cases, the add_value() function will be called less often for some columns and the column would therefore have fewer elements; furthermore, these elements would not be aligned with the rows that contain the other data elements that were produced during this iteration. An entirely different scenario is that the table is filled and at a later time we use the data in there to compute the elements of other rows; the ConvergenceTable class does something like this.

      To support both scenarios, the TableHandler class has a property called auto-fill mode. By default, auto-fill mode is off, but it can be enabled by calling set_auto_fill_mode(). If auto-fill mode is enabled we use the following algorithm:

        -
      • When calling add_value(key, value), we count the number of elements in the column corresponding to key. Let's call this number $m$.
      • -
      • We also determine the maximal number of elements in the other columns; call it $n$.
      • -
      • If $m < n-1$ then we add $n-m-1$ copies of the object T() to this column. Here, T is the data type of the given value. For example, if T is a numeric type, then T() is the number zero; if T is std::string, then T() is the empty string "".
      • +
      • When calling add_value(key, value), we count the number of elements in the column corresponding to key. Let's call this number $m$.
      • +
      • We also determine the maximal number of elements in the other columns; call it $n$.
      • +
      • If $m < n-1$ then we add $n-m-1$ copies of the object T() to this column. Here, T is the data type of the given value. For example, if T is a numeric type, then T() is the number zero; if T is std::string, then T() is the empty string "".
      • Add the given value to this column.

      Padding the column with default elements makes sure that after the addition the column has as many entries as the longest other column. In other words, if we have skipped previous invocations of add_value() for a given key, then the padding will enter default values into this column.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 2024-03-17 21:57:37.815199538 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 2024-03-17 21:57:37.823199588 +0000 @@ -268,13 +268,13 @@ class Tensor< rank_, dim, Number >

      A general tensor class with an arbitrary rank, i.e. with an arbitrary number of indices. The Tensor class provides an indexing operator and a bit of infrastructure, but most functionality is recursively handed down to tensors of rank 1 or put into external templated functions, e.g. the contract family.

      The rank of a tensor specifies which types of physical quantities it can represent:

      • -A rank-0 tensor is a scalar that can store quantities such as temperature or pressure. These scalar quantities are shown in this documentation as simple lower-case Latin letters e.g. $a, b, c, \dots$.
      • +A rank-0 tensor is a scalar that can store quantities such as temperature or pressure. These scalar quantities are shown in this documentation as simple lower-case Latin letters e.g. $a, b, c, \dots$.
      • -A rank-1 tensor is a vector with dim components and it can represent vector quantities such as velocity, displacement, electric field, etc. They can also describe the gradient of a scalar field. The notation used for rank-1 tensors is bold-faced lower-case Latin letters e.g. $\mathbf a, \mathbf b, \mathbf c, \dots$. The components of a rank-1 tensor such as $\mathbf a$ are represented as $a_i$ where $i$ is an index between 0 and dim-1.
      • +A rank-1 tensor is a vector with dim components and it can represent vector quantities such as velocity, displacement, electric field, etc. They can also describe the gradient of a scalar field. The notation used for rank-1 tensors is bold-faced lower-case Latin letters e.g. $\mathbf a, \mathbf b, \mathbf c, \dots$. The components of a rank-1 tensor such as $\mathbf a$ are represented as $a_i$ where $i$ is an index between 0 and dim-1.
      • -A rank-2 tensor is a linear operator that can transform a vector into another vector. These tensors are similar to matrices with $\text{dim} \times \text{dim}$ components. There is a related class SymmetricTensor<2,dim> for tensors of rank 2 whose elements are symmetric. Rank-2 tensors are usually denoted by bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters for example $\boldsymbol{\varepsilon}, \boldsymbol{\sigma}$. The components of a rank 2 tensor such as $\mathbf A$ are shown with two indices $(i,j)$ as $A_{ij}$. These tensors usually describe the gradients of vector fields (deformation gradient, velocity gradient, etc.) or Hessians of scalar fields. Additionally, mechanical stress tensors are rank-2 tensors that map the unit normal vectors of internal surfaces into local traction (force per unit area) vectors.
      • +A rank-2 tensor is a linear operator that can transform a vector into another vector. These tensors are similar to matrices with $\text{dim} \times \text{dim}$ components. There is a related class SymmetricTensor<2,dim> for tensors of rank 2 whose elements are symmetric. Rank-2 tensors are usually denoted by bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters for example $\boldsymbol{\varepsilon}, \boldsymbol{\sigma}$. The components of a rank 2 tensor such as $\mathbf A$ are shown with two indices $(i,j)$ as $A_{ij}$. These tensors usually describe the gradients of vector fields (deformation gradient, velocity gradient, etc.) or Hessians of scalar fields. Additionally, mechanical stress tensors are rank-2 tensors that map the unit normal vectors of internal surfaces into local traction (force per unit area) vectors.
      • -Tensors with ranks higher than 2 are similarly defined in a consistent manner. They have $\text{dim}^{\text{rank}}$ components and the number of indices required to identify a component equals rank. For rank-4 tensors, a symmetric variant called SymmetricTensor<4,dim> exists.
      • +Tensors with ranks higher than 2 are similarly defined in a consistent manner. They have $\text{dim}^{\text{rank}}$ components and the number of indices required to identify a component equals rank. For rank-4 tensors, a symmetric variant called SymmetricTensor<4,dim> exists.

      Using this tensor class for objects of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It also makes the code easier to read because of the semantic difference between a tensor (an object that relates to a coordinate system and has transformation properties with regard to coordinate rotations and transforms) and matrices (which we consider as operators on arbitrary vector spaces related to linear algebra things).

      Template Parameters
      @@ -1209,7 +1209,7 @@
      -

      Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

      +

      Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

      @@ -1237,7 +1237,7 @@
      -

      Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

      +

      Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

      @@ -1903,11 +1903,11 @@

      Entrywise multiplication of two tensor objects of general rank.

      This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

      -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_857.png"/>

      Template Parameters
      @@ -1956,7 +1956,7 @@

      Note
      For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
      -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
      +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

      Definition at line 3035 of file tensor.h.

      @@ -1986,7 +1986,7 @@
      -

      Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

      +

      Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

      Definition at line 3061 of file tensor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-03-17 21:57:37.871199884 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-03-17 21:57:37.879199933 +0000 @@ -233,7 +233,7 @@

      Detailed Description

      template<int dim, int dim_A, int spacedim_A, int chartdim_A, int dim_B, int spacedim_B, int chartdim_B>
      class TensorProductManifold< dim, dim_A, spacedim_A, chartdim_A, dim_B, spacedim_B, chartdim_B >

      Tensor product manifold of two ChartManifolds.

      -

      This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

      +

      This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

      An example usage would be the combination of a SphericalManifold with space dimension 2 and a FlatManifold with space dimension 1 to form a cylindrical manifold.

      pull_back(), push_forward(), and push_forward_gradient() are implemented by splitting the input argument into inputs for A and B according to the given dimensions and applying the corresponding operations before concatenating the result.

      Note
      The dimension arguments dim_A and dim_B are not used.
      @@ -605,24 +605,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -631,11 +631,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-03-17 21:57:37.911200131 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-03-17 21:57:37.911200131 +0000 @@ -161,9 +161,9 @@ M_1 \otimes A_0 \end{align*}" src="form_1936.png"/>

      -

      in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

      -

      This class implements two basic operations, namely the usual multiplication by a vector and the inverse. For both operations, fast tensorial techniques can be applied that implement the operator evaluation in $\text{size}(M)^{d+1}$ arithmetic operations, considerably less than $\text{size}(M)^{2d}$ for the naive forward transformation and $\text{size}(M)^{3d}$ for setting up the inverse of $L$.

      -

      Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to 1964's work by Lynch et al. [Lynch1964],

      +

      in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

      +

      This class implements two basic operations, namely the usual multiplication by a vector and the inverse. For both operations, fast tensorial techniques can be applied that implement the operator evaluation in $\text{size}(M)^{d+1}$ arithmetic operations, considerably less than $\text{size}(M)^{2d}$ for the naive forward transformation and $\text{size}(M)^{3d}$ for setting up the inverse of $L$.

      +

      Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to 1964's work by Lynch et al. [Lynch1964],

      \begin{align*}
 L^{-1} &= S_1 \otimes S_0 (\Lambda_1 \otimes I + I \otimes \Lambda_0)^{-1}
 S_1^\mathrm T \otimes S_0^\mathrm T,
@@ -174,7 +174,7 @@
 A_d s  &= \lambda M_d s, d = 0, \quad \ldots,\mathrm{dim},
 \end{align*}

      -

      and $\Lambda_d$ is the diagonal matrix representing the generalized eigenvalues $\lambda$. Note that the vectors $s$ are such that they simultaneously diagonalize $A_d$ and $M_d$, i.e. $S_d^{\mathrm T} A_d S_d =
+<p> and <picture><source srcset=$\Lambda_d$ is the diagonal matrix representing the generalized eigenvalues $\lambda$. Note that the vectors $s$ are such that they simultaneously diagonalize $A_d$ and $M_d$, i.e. $S_d^{\mathrm T} A_d S_d =
 \Lambda_d$ and $S_d^{\mathrm T} M_d S_d = I$. This method of matrix inversion is called fast diagonalization method.

      This class requires LAPACK support.

      Note
      This class allows for two modes of usage. The first is a use case with run time constants for the matrix dimensions that is achieved by setting the optional template parameter n_rows_1d to -1. The second mode of usage that is faster allows to set the template parameter as a compile time constant, giving significantly faster code in particular for small sizes of the matrix.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 2024-03-17 21:57:37.951200378 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 2024-03-17 21:57:37.951200378 +0000 @@ -157,9 +157,9 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

      Detailed Description

      template<int dim>
      -class TensorProductPolynomialsBubbles< dim >

      A class that represents a space of tensor product polynomials, augmented by $dim$ (non-normalized) bubble functions of form $\varphi_j(\mathbf x)
+class TensorProductPolynomialsBubbles< dim ></div><p>A class that represents a space of tensor product polynomials, augmented by <picture><source srcset=$dim$ (non-normalized) bubble functions of form $\varphi_j(\mathbf x)
 = 2^{\text{degree}-1}\left(x_j-frac 12\right)^{\text{degree}-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$ for $j=0,\ldots,dim-1$. If degree is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell.

      +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_877.png"/> for $j=0,\ldots,dim-1$. If degree is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell.

      This class inherits most of its functionality from TensorProductPolynomials. The bubble enrichments are added for the last index.

      Definition at line 53 of file tensor_product_polynomials_bubbles.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 2024-03-17 21:57:38.007200724 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 2024-03-17 21:57:38.011200748 +0000 @@ -232,7 +232,7 @@

      Detailed Description

      template<int dim>
      -class TorusManifold< dim >

      Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

      +class TorusManifold< dim >

      Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

      This class was developed to be used in conjunction with GridGenerator::torus.

      Definition at line 787 of file manifold_lib.h.

      @@ -638,7 +638,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

      Refer to the general documentation of this class for more information.

      @@ -668,24 +668,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -694,11 +694,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-03-17 21:57:38.067201095 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-03-17 21:57:38.075201144 +0000 @@ -220,16 +220,16 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      class TransfiniteInterpolationManifold< dim, spacedim >

      A mapping class that extends curved boundary descriptions into the interior of the computational domain. The outer curved boundary description is assumed to be given by another manifold (e.g. a polar manifold on a circle). The mechanism to extend the boundary information is a so-called transfinite interpolation. The use of this class is discussed extensively in step-65.

      -

      The formula for extending such a description in 2d is, for example, described on Wikipedia. Given a point $(u,v)$ on the chart, the image of this point in real space is given by

      -\begin{align*}
+<p>The formula for extending such a description in 2d is, for example, described on <a href=Wikipedia. Given a point $(u,v)$ on the chart, the image of this point in real space is given by

      +\begin{align*}
 \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v)
 + u \mathbf c_3(v) \\
 &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf
 x_2 + uv \mathbf x_3 \right]
-\end{align*} +\end{align*}" src="form_1461.png"/>

      -

      where $\bf x_0, \bf x_1, \bf x_2, \bf x_3$ denote the four bounding vertices bounding the image space and $\bf c_0, \bf c_1, \bf c_2, \bf c_3$ are the four curves describing the lines of the cell. If a curved manifold is attached to any of these lines, the evaluation is done according to Manifold::get_new_point() with the two end points of the line and appropriate weight. In 3d, the generalization of this formula is implemented, creating a weighted sum of the vertices (positive contribution), the lines (negative), and the faces (positive contribution).

      -

      This manifold is usually attached to a coarse mesh and then places new points as a combination of the descriptions on the boundaries, weighted appropriately according to the position of the point in the original chart coordinates $(u,v)$. This manifold should be preferred over setting only a curved manifold on the boundary of a mesh in most situations as it yields more uniform mesh distributions as the mesh is refined because it switches from a curved description to a straight description over all children of the initial coarse cell this manifold was attached to. This way, the curved nature of the manifold that is originally contained in one coarse mesh layer will be applied to more than one fine mesh layer once the mesh gets refined. Note that the mechanisms of TransfiniteInterpolationManifold are also built into the MappingQ class when only a surface of a cell is subject to a curved description, ensuring that even the default case without this manifold gets optimal convergence rates when applying curved boundary descriptions.

      +

      where $\bf x_0, \bf x_1, \bf x_2, \bf x_3$ denote the four bounding vertices bounding the image space and $\bf c_0, \bf c_1, \bf c_2, \bf c_3$ are the four curves describing the lines of the cell. If a curved manifold is attached to any of these lines, the evaluation is done according to Manifold::get_new_point() with the two end points of the line and appropriate weight. In 3d, the generalization of this formula is implemented, creating a weighted sum of the vertices (positive contribution), the lines (negative), and the faces (positive contribution).

      +

      This manifold is usually attached to a coarse mesh and then places new points as a combination of the descriptions on the boundaries, weighted appropriately according to the position of the point in the original chart coordinates $(u,v)$. This manifold should be preferred over setting only a curved manifold on the boundary of a mesh in most situations as it yields more uniform mesh distributions as the mesh is refined because it switches from a curved description to a straight description over all children of the initial coarse cell this manifold was attached to. This way, the curved nature of the manifold that is originally contained in one coarse mesh layer will be applied to more than one fine mesh layer once the mesh gets refined. Note that the mechanisms of TransfiniteInterpolationManifold are also built into the MappingQ class when only a surface of a cell is subject to a curved description, ensuring that even the default case without this manifold gets optimal convergence rates when applying curved boundary descriptions.

      If no curved boundaries surround a coarse cell, this class reduces to a flat manifold description.

      To give an example of using this class, the following code attaches a transfinite manifold to a circle:

      PolarManifold<dim> polar_manifold;
      @@ -1142,11 +1142,11 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      -

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      -

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      +

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      +

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-03-17 21:57:38.155201639 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-03-17 21:57:38.163201687 +0000 @@ -327,7 +327,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

      Detailed Description

      template<int structdim, int dim, int spacedim>
      -class TriaAccessor< structdim, dim, spacedim >

      A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< structdim, dim, spacedim >

      A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      There is a specialization of this class for the case where structdim equals zero, i.e., for vertices of a triangulation.

      Definition at line 757 of file tria_accessor.h.

      @@ -1716,7 +1716,7 @@

      This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

      -

      The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

      +

      The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

      For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

      Note
      If dim<spacedim we first project p onto the plane.
      @@ -1764,15 +1764,15 @@
      -

      Return the barycenter (also called centroid) of the object. The barycenter for an object $K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

      -\[
+<p>Return the barycenter (also called centroid) of the object. The barycenter for an object <picture><source srcset=$K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

      +\[
   \mathbf x_K = \frac{1}{|K|} \int_K \mathbf x \; \textrm{d}x
-\] +\]" src="form_1482.png"/>

      where the measure of the object is given by

      -\[
+<picture><source srcset=\[
   |K| = \int_K \mathbf 1 \; \textrm{d}x.
-\] +\]" src="form_1483.png"/>

      This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-03-17 21:57:38.223202058 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-03-17 21:57:38.227202083 +0000 @@ -280,7 +280,7 @@ &#href_anchor"memitem:a34cceffc302e3c23552635478b9fc983" id="r_a34cceffc302e3c23552635478b9fc983">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

      Detailed Description

      template<int spacedim>
      -class TriaAccessor< 0, 1, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< 0, 1, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

      The current specialization of the TriaAccessor<0,dim,spacedim> class for vertices of a one-dimensional triangulation exists since in the dim == 1 case vertices are also faces.

      Definition at line 2328 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-03-17 21:57:38.275202380 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-03-17 21:57:38.279202404 +0000 @@ -231,7 +231,7 @@ &#href_anchor"memitem:abda88195917e4d56f80eab016f21bde3" id="r_abda88195917e4d56f80eab016f21bde3">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

      Detailed Description

      template<int dim, int spacedim>
      -class TriaAccessor< 0, dim, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< 0, dim, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      There is a further specialization of this class for the case that dim equals one, i.e., for vertices of a one-dimensional triangulation, since in that case vertices are also faces.

      Definition at line 1910 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-03-17 21:57:38.427203318 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-03-17 21:57:38.431203342 +0000 @@ -1940,7 +1940,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-03-17 21:57:38.507203812 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-03-17 21:57:38.511203837 +0000 @@ -999,7 +999,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      Definition at line 444 of file trilinos_block_sparse_matrix.h.

      @@ -1029,7 +1029,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Definition at line 457 of file trilinos_block_sparse_matrix.h.

      @@ -1935,7 +1935,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -2040,7 +2040,7 @@
      -

      Compute the matrix scalar product $\left(u,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u,Mv\right)$.

      @@ -2437,7 +2437,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      @@ -2545,7 +2545,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-03-17 21:57:38.579204257 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-03-17 21:57:38.587204306 +0000 @@ -1690,7 +1690,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1742,7 +1742,7 @@
      -

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      +

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      @@ -1768,7 +1768,7 @@
      -

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      +

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      @@ -1794,7 +1794,7 @@
      -

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      +

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-03-17 21:57:38.647204677 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-03-17 21:57:38.655204726 +0000 @@ -1207,8 +1207,8 @@
      -

      Return a pair of indices indicating which elements of this vector are stored locally. The first number is the index of the first element stored, the second the index of the one past the last one that is stored locally. If this is a sequential vector, then the result will be the pair (0,N), otherwise it will be a pair (i,i+n), where n=local_size() and i is the first element of the vector stored on this processor, corresponding to the half open interval $[i,i+n)$

      -
      Note
      The description above is true most of the time, but not always. In particular, Trilinos vectors need not store contiguous ranges of elements such as $[i,i+n)$. Rather, it can store vectors where the elements are distributed in an arbitrary way across all processors and each processor simply stores a particular subset, not necessarily contiguous. In this case, this function clearly makes no sense since it could, at best, return a range that includes all elements that are stored locally. Thus, the function only succeeds if the locally stored range is indeed contiguous. It will trigger an assertion if the local portion of the vector is not contiguous.
      +

      Return a pair of indices indicating which elements of this vector are stored locally. The first number is the index of the first element stored, the second the index of the one past the last one that is stored locally. If this is a sequential vector, then the result will be the pair (0,N), otherwise it will be a pair (i,i+n), where n=local_size() and i is the first element of the vector stored on this processor, corresponding to the half open interval $[i,i+n)$

      +
      Note
      The description above is true most of the time, but not always. In particular, Trilinos vectors need not store contiguous ranges of elements such as $[i,i+n)$. Rather, it can store vectors where the elements are distributed in an arbitrary way across all processors and each processor simply stores a particular subset, not necessarily contiguous. In this case, this function clearly makes no sense since it could, at best, return a range that includes all elements that are stored locally. Thus, the function only succeeds if the locally stored range is indeed contiguous. It will trigger an assertion if the local portion of the vector is not contiguous.
      @@ -1319,7 +1319,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1391,7 +1391,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      @@ -1409,7 +1409,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      @@ -1427,7 +1427,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-03-17 21:57:38.691204949 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-03-17 21:57:38.699204998 +0000 @@ -368,7 +368,7 @@
      -

      A user function that applies the Jacobian $\nabla_u F(u)$ to x and writes the result in y. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the Jacobian $\nabla_u F(u)$ to x and writes the result in y. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is optional and is used in the case of certain configurations. For instance, this function is required if the polynomial line search (NOX::LineSearch::Polynomial) is chosen, whereas for the full step case (NOX::LineSearch::FullStep) it won't be called.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors, so if a callback throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.
      @@ -390,7 +390,7 @@
      -

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y and writes the result in x. The parameter tolerance specifies the error reduction if an iterative solver is used in applying the inverse matrix. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y and writes the result in x. The parameter tolerance specifies the error reduction if an iterative solver is used in applying the inverse matrix. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is optional and is used in the case of certain configurations.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors, so if a callback throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.
      @@ -412,7 +412,7 @@
      -

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y, writes the result in x and returns the number of linear iterations the linear solver needed. The parameter tolerance species the error reduction if an iterative solver is used. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y, writes the result in x and returns the number of linear iterations the linear solver needed. The parameter tolerance species the error reduction if an iterative solver is used. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is used if solve_with_jacobian is not provided. Its return value is compared again AdditionalFlags::threshold_n_linear_iterations; if it is larger, the preconditioner will be built before the next linear system is solved. The use of this approach is predicated on the idea that one can keep using a preconditioner built earlier as long as it is a good preconditioner for the matrix currently in use – where "good" is defined as leading to a number of iterations to solve linear systems less than the threshold given by the current variable.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors, so if a callback throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-03-17 21:57:38.771205443 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-03-17 21:57:38.775205467 +0000 @@ -2097,7 +2097,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

      The vector has to be initialized with the same IndexSet the matrix was initialized with.

      @@ -2122,7 +2122,7 @@ const MPI::Vector & v&#href_anchor"memdoc"> -

      Compute the matrix scalar product $\left(u,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u,Mv\right)$.

      The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

      The vector u has to be initialized with the same IndexSet that was used for the row indices of the matrix and the vector v has to be initialized with the same IndexSet that was used for the column indices of the matrix.

      In case of a localized Vector, this function will only work when running on one processor, since the matrix object is inherently distributed. Otherwise, an exception will be thrown.

      @@ -2231,10 +2231,10 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=
+<p>Return the <em>l</em><sub>1</sub>-norm of the matrix, that is <picture><source srcset=$|M|_1=
 \max_{\mathrm{all\ columns\ } j} \sum_{\mathrm{all\ rows\ } i}
-|M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1 \leq |M|_1
-|v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +|M_{ij}|$" src="form_1959.png"/>, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1 \leq |M|_1
+|v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 1911 of file trilinos_sparse_matrix.cc.

      @@ -2254,8 +2254,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ } i}\sum_{\mathrm{all\ columns\ }
-j} |M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the linfty-norm of the matrix, that is  <picture><source srcset=$|M|_\infty=\max_{\mathrm{all\ rows\ } i}\sum_{\mathrm{all\ columns\ }
+j} |M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 1920 of file trilinos_sparse_matrix.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-03-17 21:57:38.843205887 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-03-17 21:57:38.839205863 +0000 @@ -454,7 +454,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      It is possible to specify the number of columns entries per row using the optional n_entries_per_row argument. However, this value does not need to be accurate or even given at all, since one does usually not have this kind of information before building the sparsity pattern (the usual case when the function DoFTools::make_sparsity_pattern() is called). The entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes. However, a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 100 of file trilinos_sparsity_pattern.cc.

      @@ -484,7 +484,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      The vector n_entries_per_row specifies the number of entries in each row (an information usually not available, though).

      Definition at line 109 of file trilinos_sparsity_pattern.cc.

      @@ -756,7 +756,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The number of columns entries per row is specified as the maximum number of entries argument. This does not need to be an accurate number since the entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes, but a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 214 of file trilinos_sparsity_pattern.cc.

      @@ -786,7 +786,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The vector n_entries_per_row specifies the number of entries in each row.

      Definition at line 227 of file trilinos_sparsity_pattern.cc.

      @@ -1287,7 +1287,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      Definition at line 896 of file trilinos_sparsity_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-03-17 21:57:38.899206234 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-03-17 21:57:38.903206258 +0000 @@ -316,7 +316,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

      Constructor that takes the number of locally-owned degrees of freedom local_size and the number of ghost degrees of freedom ghost_size.

      -

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      +

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      Note
      Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.

      Definition at line 65 of file partitioner.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-03-17 21:57:38.939206480 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-03-17 21:57:38.943206505 +0000 @@ -225,7 +225,7 @@ const unsigned int column_block_size&#href_anchor"memdoc">

      Constructor for a process grid for a given mpi_communicator. In this case the process grid is heuristically chosen based on the dimensions and block-cyclic distribution of a target matrix provided in n_rows_matrix, n_columns_matrix, row_block_size and column_block_size.

      -

      The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

      +

      The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

      For example, a square matrix $640x640$ with the block size $32$ and the mpi_communicator with 11 cores will result in the $3x3$ process grid.

      Definition at line 209 of file process_grid.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-03-17 21:57:39.003206875 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-03-17 21:57:39.011206925 +0000 @@ -1273,7 +1273,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1315,7 +1315,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1336,7 +1336,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1357,7 +1357,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-03-17 21:57:39.075207320 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-03-17 21:57:39.079207346 +0000 @@ -1324,7 +1324,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
      Parameters
      @@ -1420,7 +1420,7 @@

      Given a component mask (see this glossary entry ), produce a block mask (see this glossary entry ) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-03-17 21:57:39.107207519 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-03-17 21:57:39.115207567 +0000 @@ -166,7 +166,7 @@
      Parameters
      - +
      real_support_pointsThe position of the mapping support points in real space, queried by MappingQ::compute_mapping_support_points().
      unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
      unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-03-17 21:57:39.275208556 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-03-17 21:57:39.279208580 +0000 @@ -2026,7 +2026,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2426,7 +2426,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-03-17 21:57:39.435209544 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-03-17 21:57:39.435209544 +0000 @@ -1586,7 +1586,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2053,7 +2053,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-03-17 21:57:39.619210680 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-03-17 21:57:39.619210680 +0000 @@ -1945,7 +1945,7 @@
      -

      Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

      +

      Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

      Definition at line 3609 of file tria.cc.

      @@ -3039,7 +3039,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -3355,7 +3355,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:39.787211718 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-03-17 21:57:39.787211718 +0000 @@ -2361,7 +2361,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2760,7 +2760,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-03-17 21:57:39.951212731 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-03-17 21:57:39.955212755 +0000 @@ -2529,7 +2529,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2857,7 +2857,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-03-17 21:57:40.115213744 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-03-17 21:57:40.123213793 +0000 @@ -2029,7 +2029,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2376,7 +2376,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      /usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-03-17 21:57:40.167214065 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-03-17 21:57:40.171214090 +0000 @@ -105,34 +105,34 @@
      Use numbers::invalid_fe_index instead.
      Member DoFHandler< dim, spacedim >::set_active_fe_indices (const std::vector< unsigned int > &active_fe_indices)
      Use set_active_fe_indices() with the types::fe_index datatype.
      -
      Member DoFTools::extract_boundary_dofs (const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask, IndexSet &selected_dofs, const std::set< types::boundary_id > &boundary_ids={})
      -
      Use the previous function instead.
      Member DoFTools::extract_boundary_dofs (const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask, std::vector< bool > &selected_dofs, const std::set< types::boundary_id > &boundary_ids={})
      -
      This function will not work for DoFHandler objects that are built on a parallel::distributed::Triangulation object. The reasons is that the output argument selected_dofs has to have a length equal to all global degrees of freedom. Consequently, this does not scale to very large problems, and this is also why the function is deprecated. If you need the functionality of this function for parallel triangulations, then you need to use the other DoFTools::extract_boundary_dofs() function that returns its information via an IndexSet object.
      +
      This function will not work for DoFHandler objects that are built on a parallel::distributed::Triangulation object. The reasons is that the output argument selected_dofs has to have a length equal to all global degrees of freedom. Consequently, this does not scale to very large problems, and this is also why the function is deprecated. If you need the functionality of this function for parallel triangulations, then you need to use the other DoFTools::extract_boundary_dofs() function that returns its information via an IndexSet object.
      +
      Member DoFTools::extract_boundary_dofs (const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask, IndexSet &selected_dofs, const std::set< types::boundary_id > &boundary_ids={})
      +
      Use the previous function instead.
      Member DoFTools::extract_locally_active_dofs (const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set)
      -
      Use the previous function instead.
      +
      Use the previous function instead.
      Member DoFTools::extract_locally_active_level_dofs (const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set, const unsigned int level)
      -
      Use the previous function instead.
      +
      Use the previous function instead.
      Member DoFTools::extract_locally_relevant_dofs (const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set)
      -
      Use the previous function instead.
      +
      Use the previous function instead.
      Member DoFTools::extract_locally_relevant_level_dofs (const DoFHandler< dim, spacedim > &dof_handler, const unsigned int level, IndexSet &dof_set)
      -
      Use the previous function instead.
      +
      Use the previous function instead.
      Member DoFTools::get_active_fe_indices (const DoFHandler< dim, spacedim > &dof_handler, std::vector< unsigned int > &active_fe_indices)
      -
      Use DoFHandler::get_active_fe_indices() that returns the result vector.
      -
      Member DoFTools::map_dofs_to_support_points (const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask=ComponentMask())
      -
      Use the function that returns the std::map instead.
      +
      Use DoFHandler::get_active_fe_indices() that returns the result vector.
      Member DoFTools::map_dofs_to_support_points (const hp::MappingCollection< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask=ComponentMask())
      -
      Use the function that returns the std::map instead.
      -
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const VectorizedArrayType *values_array, const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
      -
      use evaluate() with the EvaluationFlags argument.
      +
      Use the function that returns the std::map instead.
      +
      Member DoFTools::map_dofs_to_support_points (const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask=ComponentMask())
      +
      Use the function that returns the std::map instead.
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
      use evaluate() with the EvaluationFlags argument.
      +
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const VectorizedArrayType *values_array, const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
      +
      use evaluate() with the EvaluationFlags argument.
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::gather_evaluate (const VectorType &input_vector, const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
      Please use the gather_evaluate() function with the EvaluationFlags argument.
      -
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate (const bool integrate_values, const bool integrate_gradients, VectorizedArrayType *values_array)
      -
      Please use the integrate() function with the EvaluationFlags argument.
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate (const bool integrate_values, const bool integrate_gradients)
      Please use the integrate() function with the EvaluationFlags argument.
      +
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate (const bool integrate_values, const bool integrate_gradients, VectorizedArrayType *values_array)
      +
      Please use the integrate() function with the EvaluationFlags argument.
      Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate_scatter (const bool integrate_values, const bool integrate_gradients, VectorType &output_vector)
      Please use the integrate_scatter() function with the EvaluationFlags argument.
      Member FEFaceEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const bool evaluate_values, const bool evaluate_gradients)
      @@ -192,17 +192,17 @@
      Member FEInterfaceViews::Vector< dim, spacedim >::jump_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
      Use the average_of_hessians() function instead.
      Struct FEValuesViews::Scalar< dim, spacedim >::OutputType< Number >
      -
      Use the types defined in the surrounding class instead.
      +
      Use the types defined in the surrounding class instead.
      Struct FEValuesViews::SymmetricTensor< 2, dim, spacedim >::OutputType< Number >
      -
      Use the types defined in the surrounding class instead.
      +
      Use the types defined in the surrounding class instead.
      Struct FEValuesViews::Tensor< 2, dim, spacedim >::OutputType< Number >
      -
      Use the types defined in the surrounding class instead.
      -
      Struct FEValuesViews::Vector< dim, spacedim >::OutputType< Number >
      Use the types defined in the surrounding class instead.
      +
      Struct FEValuesViews::Vector< dim, spacedim >::OutputType< Number >
      +
      Use the types defined in the surrounding class instead.
      Member FiniteElement< dim, spacedim >::fill_fe_face_values (const typename Triangulation< dim, spacedim >::cell_iterator &cell, const unsigned int face_no, const Quadrature< dim - 1 > &quadrature, const Mapping< dim, spacedim > &mapping, const typename Mapping< dim, spacedim >::InternalDataBase &mapping_internal, const internal::FEValuesImplementation::MappingRelatedData< dim, spacedim > &mapping_data, const InternalDataBase &fe_internal, internal::FEValuesImplementation::FiniteElementRelatedData< dim, spacedim > &output_data) const
      -
      Use the version taking a hp::QCollection argument.
      +
      Use the version taking a hp::QCollection argument.
      Member FiniteElement< dim, spacedim >::get_face_data (const UpdateFlags update_flags, const Mapping< dim, spacedim > &mapping, const Quadrature< dim - 1 > &quadrature, internal::FEValuesImplementation::FiniteElementRelatedData< dim, spacedim > &output_data) const
      -
      Use the version taking a hp::QCollection argument.
      +
      Use the version taking a hp::QCollection argument.
      Class GridReordering< dim, spacedim >
      Use GridTools::invert_all_negative_measure_cells() or GridTools::consistently_order_cells() instead of the functions provided by this class. Usage of the old-style numbering is deprecated.
      Member GridReordering< dim, spacedim >::invert_all_cells_of_negative_grid (const std::vector< Point< spacedim > > &all_vertices, std::vector< CellData< dim > > &original_cells, const bool use_new_style_ordering=false)
      @@ -210,11 +210,11 @@
      Member GridReordering< dim, spacedim >::reorder_cells (std::vector< CellData< dim > > &original_cells, const bool use_new_style_ordering=false)
      Use GridTools::consistently_order_cells() instead.
      Member GridTools::cell_measure (const std::vector< Point< dim > > &all_vertices, const unsigned int(&vertex_indices)[GeometryInfo< dim >::vertices_per_cell])
      -
      Use the more general function which takes an ArrayView instead.
      +
      Use the more general function which takes an ArrayView instead.
      Struct GridTools::CellDataTransferBuffer< dim, T >
      -
      The implementation in deal.II has been rewritten, making this class obsolete for use within deal.II. Use your own data structures instead.
      +
      The implementation in deal.II has been rewritten, making this class obsolete for use within deal.II. Use your own data structures instead.
      Member GridTools::rotate (const double angle, const unsigned int axis, Triangulation< dim, 3 > &triangulation)
      -
      Use the alternative with the unit vector instead.
      +
      Use the alternative with the unit vector instead.
      Member identity
      Use std_cxx20::identity_type instead.
      Member LinearAlgebra::CUDAWrappers::Vector< Number >::import (const ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={}) override
      @@ -223,20 +223,20 @@
      Use import_elements() instead.
      Member LinearAlgebra::distributed::BlockVector< Number >::zero_out_ghosts () const
      Use zero_out_ghost_values() instead.
      -
      Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::import (const Vector< Number, MemorySpace2 > &src, VectorOperation::values operation)
      -
      Use import_elements() instead.
      Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::import (const LinearAlgebra::ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={}) override
      Use import_elements() instead.
      +
      Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::import (const Vector< Number, MemorySpace2 > &src, VectorOperation::values operation)
      +
      Use import_elements() instead.
      Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::local_size () const
      Use locally_owned_size() instead.
      Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::zero_out_ghosts () const
      Use zero_out_ghost_values() instead.
      Member LinearAlgebra::EpetraWrappers::Vector::import (const ReadWriteVector< double > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={}) override
      Use import_elements() instead.
      -
      Member LinearAlgebra::ReadWriteVector< Number >::import (const ::Vector< Number > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
      -
      Use import_elements() instead.
      Member LinearAlgebra::ReadWriteVector< Number >::import (const LinearAlgebra::Vector< Number > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
      Use import_elements() instead.
      +
      Member LinearAlgebra::ReadWriteVector< Number >::import (const ::Vector< Number > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
      +
      Use import_elements() instead.
      Member LinearAlgebra::ReadWriteVector< Number >::import (const distributed::Vector< Number, MemorySpace > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
      Use import_elements() instead.
      Member LinearAlgebra::ReadWriteVector< Number >::import (const PETScWrappers::MPI::Vector &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
      @@ -257,18 +257,18 @@
      Use import_elements() instead.
      Member LinearAlgebra::VectorSpaceVector< Number >::import (const ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={})=0
      Use import_elements() instead.
      +
      Member make_array_view (Tensor< rank, dim, Number > &tensor)
      +
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member make_array_view (SymmetricTensor< rank, dim, Number > &tensor)
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member make_array_view (const SymmetricTensor< rank, dim, Number > &tensor)
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member make_array_view (const Tensor< rank, dim, Number > &tensor)
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      -
      Member make_array_view (Tensor< rank, dim, Number > &tensor)
      -
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member Mapping< dim, spacedim >::fill_fe_face_values (const typename Triangulation< dim, spacedim >::cell_iterator &cell, const unsigned int face_no, const Quadrature< dim - 1 > &quadrature, const typename Mapping< dim, spacedim >::InternalDataBase &internal_data, internal::FEValuesImplementation::MappingRelatedData< dim, spacedim > &output_data) const
      -
      Use the version taking a hp::QCollection argument.
      +
      Use the version taking a hp::QCollection argument.
      Member Mapping< dim, spacedim >::get_face_data (const UpdateFlags update_flags, const Quadrature< dim - 1 > &quadrature) const
      -
      Use the version taking a hp::QCollection argument.
      +
      Use the version taking a hp::QCollection argument.
      Member MappingQCache< dim, spacedim >::initialize (const Triangulation< dim, spacedim > &triangulation, const MappingQ< dim, spacedim > &mapping)
      Use initialize() version above instead.
      Member parallel::distributed::Triangulation< dim, spacedim >::load (const std::string &filename, const bool autopartition) override
      @@ -328,21 +328,21 @@
      Member Physics::Transformations::Rotations::rotation_matrix_3d (const Point< 3, Number > &axis, const Number &angle)
      Use the variant with a Tensor as an axis.
      Member ReferenceCell::compute_orientation (const std::array< T, N > &vertices_0, const std::array< T, N > &vertices_1) const
      -
      Use get_combined_orientation() instead.
      +
      Use get_combined_orientation() instead.
      Member ReferenceCell::permute_according_orientation (const std::array< T, N > &vertices, const unsigned int orientation) const
      -
      Use permute_by_combined_orientation() instead.
      +
      Use permute_by_combined_orientation() instead.
      Class SLEPcWrappers::TransformationSpectrumFolding
      Since deal.II requires PETSc 3.7 or newer this class no longer does anything.
      Member SparsityTools::distribute_sparsity_pattern (BlockDynamicSparsityPattern &dsp, const std::vector< IndexSet > &owned_set_per_cpu, const MPI_Comm mpi_comm, const IndexSet &myrange)
      Use the distribute_sparsity_pattern() with a single index set for the present MPI process only.
      Member SUNDIALS::IDA< VectorType >::solve_jacobian_system )
      -
      Use solve_with_jacobian() instead which also uses a numerical tolerance.
      +
      Use solve_with_jacobian() instead which also uses a numerical tolerance.
      Member SUNDIALS::KINSOL< VectorType >::solve_jacobian_system )
      -
      Versions of SUNDIALS after 4.0 no longer provide all of the information necessary for this callback (see below). Use the solve_with_jacobian callback described below.
      -
      Member SymmetricTensor< rank_, dim, Number >::begin_raw () const
      -
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      +
      Versions of SUNDIALS after 4.0 no longer provide all of the information necessary for this callback (see below). Use the solve_with_jacobian callback described below.
      Member SymmetricTensor< rank_, dim, Number >::begin_raw ()
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      +
      Member SymmetricTensor< rank_, dim, Number >::begin_raw () const
      +
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member SymmetricTensor< rank_, dim, Number >::end_raw () const
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member SymmetricTensor< rank_, dim, Number >::end_raw ()
      @@ -351,22 +351,22 @@
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member Tensor< 0, dim, Number >::begin_raw ()
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      -
      Member Tensor< 0, dim, Number >::end_raw () const
      -
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member Tensor< 0, dim, Number >::end_raw ()
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      +
      Member Tensor< 0, dim, Number >::end_raw () const
      +
      This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member Tensor< rank_, dim, Number >::unroll (Vector< OtherNumber > &result) const
      Use the more general function that takes a pair of iterators instead.
      Member Threads::new_thread (RT(C::*fun_ptr)(Args...) const, std_cxx20::type_identity_t< const C > &c, std_cxx20::type_identity_t< Args >... args)
      Use std::thread or std::jthread instead.
      Member Threads::new_thread (RT(C::*fun_ptr)(Args...), std_cxx20::type_identity_t< C > &c, std_cxx20::type_identity_t< Args >... args)
      Use std::thread or std::jthread instead.
      -
      Member Threads::new_thread (RT(*fun_ptr)(Args...), std_cxx20::type_identity_t< Args >... args)
      -
      Use std::thread or std::jthread instead.
      -
      Member Threads::new_thread (FunctionObjectType function_object) -> Thread< decltype(function_object())>
      -
      Use std::thread or std::jthread instead.
      Member Threads::new_thread (const std::function< RT()> &function)
      Use std::thread or std::jthread instead.
      +
      Member Threads::new_thread (FunctionObjectType function_object) -> Thread< decltype(function_object())>
      +
      Use std::thread or std::jthread instead.
      +
      Member Threads::new_thread (RT(*fun_ptr)(Args...), std_cxx20::type_identity_t< Args >... args)
      +
      Use std::thread or std::jthread instead.
      Class Threads::Thread< RT >
      Use std::thread or std::jthread instead.
      Class Threads::ThreadGroup< RT >
      @@ -376,9 +376,9 @@
      Member TriaAccessor< 0, dim, spacedim >::number_of_children ()
      Use n_active_descendants() instead.
      Member TriaAccessor< structdim, dim, spacedim >::number_of_children () const
      /usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-03-17 21:57:40.207214313 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-03-17 21:57:40.211214336 +0000 @@ -145,21 +145,21 @@
      -

      One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

      -\[
+<p>One of the uses of <a class=DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

      +\[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
   \mathbf F(\mathbf x + \Delta \mathbf x) - \mathbf F(\mathbf x).
-\] +\]" src="form_396.png"/>

      The transformation corresponds to

      -\[
+<picture><source srcset=\[
   [\text{result}]_{i_1,\dots,i_k} = i\sum_{j}
   \left[\nabla \mathbf F(\mathbf x)\right]_{i_1,\dots,i_k, j}
   \Delta x_j
-\] +\]" src="form_397.png"/>

      -

      in index notation and corresponds to $[\Delta \mathbf x] [\nabla \mathbf F(\mathbf x)]^T$ in matrix notation.

      +

      in index notation and corresponds to $[\Delta \mathbf x] [\nabla \mathbf F(\mathbf x)]^T$ in matrix notation.

      Definition at line 454 of file derivative_form.h.

      @@ -189,7 +189,7 @@
      -

      Similar to the previous apply_transformation(). Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

      +

      Similar to the previous apply_transformation(). Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

      Definition at line 479 of file derivative_form.h.

      @@ -219,7 +219,7 @@
      -

      Similar to the previous apply_transformation(), specialized for the case dim == spacedim where we can return a rank-2 tensor instead of the more general DerivativeForm. Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

      +

      Similar to the previous apply_transformation(), specialized for the case dim == spacedim where we can return a rank-2 tensor instead of the more general DerivativeForm. Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

      Definition at line 505 of file derivative_form.h.

      @@ -279,11 +279,11 @@
      -

      Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

      -\[
+<p>Similar to the previous <a class=apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

      +\[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
-\] +\]" src="form_404.png"/>

      Definition at line 565 of file derivative_form.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html differs (HTML document, ASCII text) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html 2024-03-17 21:57:40.259214633 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html 2024-03-17 21:57:40.263214658 +0000 @@ -446,8 +446,8 @@ - + @@ -613,8 +613,8 @@ - + @@ -734,8 +734,8 @@ - + @@ -899,8 +899,8 @@ - + @@ -966,8 +966,8 @@ - + @@ -1168,16 +1168,16 @@ + - + - @@ -1228,8 +1228,8 @@ - + @@ -1334,8 +1334,8 @@ - + @@ -1425,8 +1425,8 @@ - + @@ -1478,8 +1478,8 @@ - + @@ -1492,8 +1492,8 @@ - + @@ -1518,8 +1518,8 @@ - + @@ -1584,8 +1584,8 @@ - + @@ -1666,8 +1666,8 @@ - + @@ -1679,8 +1679,8 @@ - + @@ -1768,8 +1768,8 @@ - + @@ -1835,8 +1835,8 @@ - + @@ -1864,8 +1864,8 @@ - + @@ -1975,8 +1975,8 @@ - + /usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 2024-03-17 21:57:40.403215523 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 2024-03-17 21:57:40.411215572 +0000 @@ -1021,8 +1021,8 @@
      3213 const MeshType & mesh,
      -
      3214 const types::boundary_id b_id1,
      -
      3215 const types::boundary_id b_id2,
      +
      3214 const types::boundary_id b_id1,
      +
      3215 const types::boundary_id b_id2,
      3216 const unsigned int direction,
      3218 & matched_pairs,
      @@ -2596,6 +2596,7 @@
      STL namespace.
      ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
      Definition types.h:33
      +
      unsigned int boundary_id
      Definition types.h:141
      unsigned int subdomain_id
      Definition types.h:44
      unsigned int global_dof_index
      Definition types.h:82
      /usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-03-17 21:57:40.459215868 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-03-17 21:57:40.467215918 +0000 @@ -177,7 +177,7 @@

      The macro DEAL_II_CONSTEXPR expands to constexpr if the compiler supports enough constexpr features (such as loops). If the compiler does not then this macro expands to nothing.

      Functions declared as constexpr can be evaluated at compile time. Hence code like

      constexpr double det_A = determinant(A);
      DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
      -

      assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

      +

      assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

      Function Documentation

      ◆ new_thread()

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-03-17 21:57:40.491216066 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-03-17 21:57:40.495216091 +0000 @@ -176,7 +176,7 @@
      template <typename VectorType>
      virtual void Tstep(VectorType &u, const VectorType &v) const =0;
      };
      -

      where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

      +

      where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

    SparsityPatternType
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-03-17 21:57:40.851218290 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-03-17 21:57:40.855218314 +0000 @@ -324,7 +324,7 @@
    std::function<void(Domain &, const Range &)> Tvmult;
    std::function<void(Domain &, const Range &)> Tvmult_add;

    Thus, such an object can be used as a matrix object in all iterative solver classes, either as a matrix object, or as preconditioner.

    -

    The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    +

    The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    double k;
    @@ -376,7 +376,7 @@
    result += b;
    result -= c;
    result += d;
    -

    that avoids any intermediate storage. As a second example (involving a LinearOperator object) consider the computation of a residual $b-Ax$:

    +

    that avoids any intermediate storage. As a second example (involving a LinearOperator object) consider the computation of a residual $b-Ax$:

    // ..
    @@ -1455,7 +1455,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1468,60 +1468,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1852.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1857.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1859.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1860.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1861.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1868.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1870.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1872.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1873.png"/>

    @@ -1567,10 +1567,10 @@
    LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1593,8 +1593,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc . The solution of a multi- component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc .

    See also
    Block (linear algebra)
    @@ -1631,15 +1631,15 @@ const Range_2 & g&#href_anchor"memdoc">

    For the system of equations

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1879.png"/>

    this operation performs the pre-processing (condensation) step on the RHS subvector g so that the Schur complement can be used to solve this system of equations. More specifically, it produces an object that represents the condensed form of the subvector g, namely

    -\[
+<picture><source srcset=\[
   g' = g - C \: A^{-1} \: f
-\] +\]" src="form_1880.png"/>

    See also
    Block (linear algebra)
    @@ -1675,15 +1675,15 @@ const Range_1 & f&#href_anchor"memdoc">

    For the system of equations

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1879.png"/>

    this operation performs the post-processing step of the Schur complement to solve for the second subvector x once subvector y is known, with the result that

    -\[
+<picture><source srcset=\[
   x =  A^{-1}(f - By)
-\] +\]" src="form_1881.png"/>

    See also
    Block (linear algebra)
    @@ -3052,7 +3052,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -3065,60 +3065,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1852.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html	2024-03-17 21:57:40.887218512 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html	2024-03-17 21:57:40.891218537 +0000
@@ -124,7 +124,7 @@
     w_q,
   \]

    - where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight. + where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight.

    In order to evaluate such an expression in an application code, we have to access three different kinds of objects: a quadrature object that describes locations $\hat{\bf x}_q$ and weights $w_q$ of quadrature points on the reference cell; a finite element object that describes the gradients $\hat\nabla \varphi_i(\hat{\bf x}_q)$ of shape functions on the unit cell; and a mapping object that provides the Jacobian as well as its determinant. Dealing with all these objects would be cumbersome and error prone.

    On the other hand, these three kinds of objects almost always appear together, and it is in fact very rare for deal.II application codes to do anything with quadrature, finite element, or mapping objects besides using them together. For this reason, deal.II uses the FEValues abstraction combining information on the shape functions, the geometry of the actual mesh cell and a quadrature rule on a reference cell. Upon construction it takes one object of each of the three mentioned categories. Later, it can be "re-initialized" for a concrete grid cell and then provides mapped quadrature points and weights, mapped shape function values and derivatives as well as some properties of the transformation from the reference cell to the actual mesh cell.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-03-17 21:57:40.931218784 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-03-17 21:57:40.935218808 +0000 @@ -103,7 +103,7 @@ &#href_anchor"memitem:namespaceDifferentiation_1_1SD" id="r_namespaceDifferentiation_1_1SD">namespace  Differentiation::SD &#href_anchor"details" id="details">

    Detailed Description

    A module dedicated to the implementation of functions and classes that relate to automatic and symbolic differentiation.

    -

    Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

    +

    Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

    Automatic differentiation

    Automatic differentiation (commonly also referred to as algorithmic differentiation), is a numerical method that can be used to "automatically" compute the first, and perhaps higher-order, derivatives of function(s) with respect to one or more input variables. Although this comes at a certain computational cost, the benefits to using such a tool may be significant. When used correctly the derivatives of often complicated functions can be computed to a very high accuracy. Although the exact accuracy achievable by these frameworks largely depends on their underlying mathematical formulation, some implementations compute with a precision on the order of machine accuracy. Note that this is different to classical numerical differentiation (using, for example, a finite difference approximation of a function by evaluating it at different points), which has an accuracy that depends on both the perturbation size as well as the chosen finite-difference scheme; the error of these methods is measurably larger than well-formulated automatic differentiation approaches.

    @@ -151,38 +151,38 @@
  • reverse-mode (or reverse accumulation) auto-differentiation.
  • As a point of interest, the optimal Jacobian accumulation, which performs a minimal set of computations, lies somewhere between these two limiting cases. Its computation for a general composite function remains an open problem in graph theory.

    -

    With the aid of the diagram below (it and some of the listed details courtesy of this Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

    -
    Forward mode automatic differentiation
    Forward mode automatic differentiation
    Reverse mode automatic differentiation
    Reverse mode automatic differentiation

    Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

    -

    Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

    -\[
+<p>With the aid of the diagram below (it and some of the listed details courtesy of this <a href=Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

    +
    Forward mode automatic differentiation
    Forward mode automatic differentiation
    Reverse mode automatic differentiation
    Reverse mode automatic differentiation

    Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

    +

    Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

    +\[
   f (\mathbf{x})
   = f_{0} \circ f_{1} \circ f_{2} \circ \ldots \circ f_{n} (\mathbf{x})
   \quad .
-\] +\]" src="form_10.png"/>

    -

    As was previously mentioned, if each of the primitive operations $f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

    -\[
+<p> As was previously mentioned, if each of the primitive operations <picture><source srcset=$f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

    +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \dfrac{d f_{1}}{d f_{2}} \dfrac{d f_{2}}{d f_{3}} \ldots \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}}
   \quad .
-\] +\]" src="form_14.png"/>

    -

    In forward-mode, the chain-rule is computed naturally from the "inside out". The independent variables are therefore fixed, and each sub-function $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

    -\[
+<p>In forward-mode, the chain-rule is computed naturally from the $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

    +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \left( \dfrac{d f_{1}}{d f_{2}} \left(\dfrac{d f_{2}}{d f_{3}} \left(\ldots \left( \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)\right)\right)\right)
   \quad .
-\] +\]" src="form_16.png"/>

    The computational complexity of a forward-sweep is proportional to that of the input function. However, for each directional derivative that is to be computed one sweep of the computational graph is required.

    In reverse-mode, the chain-rule is computed somewhat unnaturally from the "outside in". The values of the dependent variables first get computed and fixed, and then the preceding differential operations are evaluated and multiplied in succession with the previous results from left to right. Again, if we encapsulate and fix the order of operations using parentheses, this implies that the reverse calculation is performed by

    -\[
+<picture><source srcset=\[
 \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \left( \left( \left( \left( \left( \dfrac{d f_{0}}{d f_{1}} \right) \dfrac{d f_{1}}{d f_{2}} \right) \dfrac{d f_{2}}{d f_{3}} \right) \ldots \right) \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)
   \quad .
-\] +\]" src="form_17.png"/>

    -

    The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

    +

    The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

    Overall, the efficiency of each mode is determined by the number of independent (input) variables and dependent (output) variables. If the outputs greatly exceed the inputs in number, then forward-mode can be shown to be more efficient than reverse-mode. The converse is true when the number of input variables greatly exceeds that of the output variables. This point may be used to help inform which number type is most suitable for which set of operations are to be performed using automatic differentiation. For example, in many applications for which second derivatives are to be computed it is appropriate to combine both reverse- and forward-modes. The former would then typically be used to calculate the first derivatives, and the latter the second derivatives.

    Supported automatic differentiation libraries

    @@ -330,7 +330,7 @@

    Symbolic expressions and differentiation

    Symbolic differentiation is, in terms of its design and usage, quite different to automatic differentiation. Underlying any symbolic library is a computer algebra system (CAS) that implements a language and collection of algorithms to manipulate symbolic (or "string-like") expressions. This is most similar, from a philosophical point of view, to how algebraic operations would be performed by hand.

    -

    To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

    +

    To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

    Due to the sophisticated CAS that forms the foundation of symbolic operations, the types of manipulations are not necessarily restricted to differentiation alone, but rather may span a spectrum of manipulations relevant to discrete differential calculus, topics in pure mathematics, and more. The documentation for the SymPy library gives plenty of examples that highlight what a fully-fledged CAS is capable of. Through the Differentiation::SD::Expression class, and the associated functions in the Differentiation::SD namespace, we provide a wrapper to the high-performance SymEngine symbolic manipulation library that has enriched operator overloading and a consistent interface that makes it easy and "natural" to use. In fact, this class can be used as a "drop-in" replacement for arithmetic types in many situations, transforming the operations from being numeric to symbolic in nature; this is made especially easy when classes are templated on the underlying number type. Being focused on numerical simulation of PDEs, the functionality of the CAS that is exposed within deal.II focuses on symbolic expression creation, manipulation, and differentiation.

    The convenience wrappers to SymEngine functionality are primarily focused on manipulations that solely involve dictionary-based (i.e., something reminiscent of "string-based") operations. Although SymEngine performs these operations in an efficient manner, they are still known to be computationally expensive, especially when the operations are performed on large expressions. It should therefore be expected that the performance of the parts of code that perform differentiation, symbolic substitution, etc., may be a limiting factor when using this in production code. deal.II therefore provides an interface to accelerate the evaluation of lengthy symbolic expression through the BatchOptimizer class (itself often leveraging functionality provided by SymEngine). In particular, the BatchOptimizer simultaneously optimizes a collection of symbolic expressions using methods such as common subexpression elimination (CSE), as well as by generating high performance code-paths to evaluate these expressions through the use of a custom-generated std::function or by compiling the expression using the LLVM JIT compiler. The usage of the Differentiation::SD::BatchOptimizer class is exemplified in step-71.

    As a final note, it is important to recognize the remaining major deficiencies in deal.II's current implementation of the interface to the supported symbolic library. The level of functionality currently implemented effectively limits the use of symbolic algebra to the traditional use case (i.e. scalar and tensor algebra, as might be useful to define constitutive relations or complex functions for application as boundary conditions or source terms). In fact, step-71 demonstrates how it can be used to implement challenging constitutive models. In the future we will also implement classes to assist in performing assembly operations in the same spirit as that which has been done in the Differentiation::AD namespace.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-03-17 21:57:41.015219302 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-03-17 21:57:41.011219279 +0000 @@ -206,22 +206,22 @@

    Detailed Description

    This module deals with constraints on degrees of freedom. The central class to deal with constraints is the AffineConstraints class.

    Constraints typically come from several sources, for example:

    -

    In all of these examples, constraints on degrees of freedom are linear and possibly inhomogeneous. In other words, they always have the form $x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$. The deal.II class that deals with storing and using these constraints is AffineConstraints.

    +

    In all of these examples, constraints on degrees of freedom are linear and possibly inhomogeneous. In other words, they always have the form $x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$. The deal.II class that deals with storing and using these constraints is AffineConstraints.

    Eliminating constraints

    When building the global system matrix and the right hand sides, one can build them without taking care of the constraints, i.e. by simply looping over cells and adding the local contributions to the global matrix and right hand side objects. In order to do actual calculations, you have to 'condense' the linear system: eliminate constrained degrees of freedom and distribute the appropriate values to the unconstrained dofs. This changes the sparsity pattern of the sparse matrices used in finite element calculations and is thus a quite expensive operation. The general scheme of things is then that you build your system, you eliminate (condense) away constrained nodes using the AffineConstraints::condense() functions, then you solve the remaining system, and finally you compute the values of constrained nodes from the values of the unconstrained ones using the AffineConstraints::distribute() function. Note that the AffineConstraints::condense() function is applied to matrix and right hand side of the linear system, while the AffineConstraints::distribute() function is applied to the solution vector.

    This scheme of first building a linear system and then eliminating constrained degrees of freedom is inefficient, and a bottleneck if there are many constraints and matrices are full, i.e. especially for 3d and/or higher order or hp-finite elements. Furthermore, it is impossible to implement for parallel computations where a process may not have access to elements of the matrix. We therefore offer a second way of building linear systems, using the AffineConstraints::add_entries_local_to_global() and AffineConstraints::distribute_local_to_global() functions discussed below. The resulting linear systems are equivalent to those one gets after calling the AffineConstraints::condense() functions.

    @@ -284,7 +284,7 @@

    There are situations where degrees of freedom are constrained in more than one way, and sometimes in conflicting ways. Consider, for example the following situation:

    -

    Here, degree of freedom $x_0$ marked in blue is a hanging node. If we used trilinear finite elements, i.e. FE_Q(1), then it would carry the constraint $x_0=\frac 12 (x_{1}+x_{2})$. On the other hand, it is at the boundary, and if we have imposed boundary conditions $u|_{\partial\Omega}=g$ then we will have the constraint $x_0=g_0$ where $g_0$ is the value of the boundary function $g(\mathbf x)$ at the location of this degree of freedom.

    +

    Here, degree of freedom $x_0$ marked in blue is a hanging node. If we used trilinear finite elements, i.e. FE_Q(1), then it would carry the constraint $x_0=\frac 12 (x_{1}+x_{2})$. On the other hand, it is at the boundary, and if we have imposed boundary conditions $u|_{\partial\Omega}=g$ then we will have the constraint $x_0=g_0$ where $g_0$ is the value of the boundary function $g(\mathbf x)$ at the location of this degree of freedom.

    So, which one will win? Or maybe: which one should win? There is no good answer to this question:

    Examples:

    1. -

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

      +

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

    2. -If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    3. +If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    @@ -3862,7 +3862,7 @@

    Return a vector of Expressions representing a vectorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    +

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    Template Parameters
    @@ -3899,7 +3899,7 @@
    dimThe dimension of the returned tensor.

    Return a tensor of Expressions representing a tensorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "T" then the tensorial symbolic variable that is returned represents the vector $T$. Each component of $T$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    +

    For example, if the symbol is the string "T" then the tensorial symbolic variable that is returned represents the vector $T$. Each component of $T$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    Template Parameters
    @@ -4102,7 +4102,7 @@
    rankThe rank of the returned tensor.
    -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4131,7 +4131,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4160,7 +4160,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4189,7 +4189,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4218,7 +4218,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4247,7 +4247,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4276,7 +4276,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4305,7 +4305,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4334,8 +4334,8 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
-\mathbf{T}_{2}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
+\mathbf{T}_{2}}$.
    @@ -4364,8 +4364,8 @@ -
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
-\mathbf{S}_{2}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
+\mathbf{S}_{2}}$.
    @@ -4394,7 +4394,7 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    @@ -4423,7 +4423,7 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}}{\partial \mathbf{T}}$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-03-17 21:57:41.715223626 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-03-17 21:57:41.715223626 +0000 @@ -228,13 +228,13 @@

    Using the constraint information usually leads to reductions in bandwidth of 10 or 20 per cent, but may for some very unstructured grids also lead to an increase. You have to weigh the decrease in your case with the time spent to use the constraint information, which usually is several times longer than the ‘pure’ renumbering algorithm.

    In almost all cases, the renumbering scheme finds a corner to start with. Since there is more than one corner in most grids and since even an interior degree of freedom may be a better starting point, giving the starting point by the user may be a viable way if you have a simple scheme to derive a suitable point (e.g. by successively taking the third child of the cell top left of the coarsest level, taking its third vertex and the dof index thereof, if you want the top left corner vertex). If you do not know beforehand what your grid will look like (e.g. when using adaptive algorithms), searching a best starting point may be difficult, however, and in many cases will not justify the effort.

    Component-wise and block-wise numberings

    -

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    +

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    This kind of numbering may be obtained by calling the component_wise() function of this class. Since it does not touch the order of indices within each component, it may be worthwhile to first renumber using the Cuthill- McKee or a similar algorithm and afterwards renumbering component-wise. This will bring out the matrix structure and additionally have a good numbering within each block.

    The component_wise() function allows not only to honor enumeration based on vector components, but also allows to group together vector components into "blocks" using a defaulted argument to the various DoFRenumbering::component_wise() functions (see GlossComponent vs GlossBlock for a description of the difference). The blocks designated through this argument may, but do not have to be, equal to the blocks that the finite element reports. For example, a typical Stokes element would be

    FESystem<dim> stokes_fe (FE_Q<dim>(2), dim, // dim velocities
    FE_Q<dim>(1), 1); // one pressure
    Definition fe_system.h:209
    Definition fe_q.h:551
    -

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    +

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    On the other hand, if you really want to use block structure reported by the finite element itself (a case that is often the case if you have finite elements that have multiple vector components, e.g. the FE_RaviartThomas or FE_Nedelec elements) then you can use the DoFRenumbering::block_wise instead of the DoFRenumbering::component_wise functions.

    Cell-wise numbering

    Given an ordered vector of cells, the function cell_wise() sorts the degrees of freedom such that degrees on earlier cells of this vector will occur before degrees on later cells.

    @@ -247,7 +247,7 @@

    The MatrixFree class provides optimized algorithms for interleaving operations on vectors before and after the access of the vector data in the respective loops. The algorithm matrix_free_data_locality() makes sure that all unknowns with a short distance between the first and last access are grouped together, in order to increase the spatial data locality.

    A comparison of reordering strategies

    As a benchmark of comparison, let us consider what the different sparsity patterns produced by the various algorithms when using the $Q_2^d\times
-Q_1$ element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    +Q_1$" src="form_951.png"/> element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    The goal of reordering strategies is to improve the preconditioner. In step-22 we use a SparseILU to preconditioner for the velocity-velocity block at the top left. The quality of the preconditioner can then be measured by the number of CG iterations required to solve a linear system with this block. For some of the reordering strategies below we record this number for adaptive refinement cycle 3, with 93176 degrees of freedom; because we solve several linear systems with the same matrix in the Schur complement, the average number of iterations is reported. The lower the number the better the preconditioner and consequently the better the renumbering of degrees of freedom is suited for this task. We also state the run-time of the program, in part determined by the number of iterations needed, for the first 4 cycles on one of our machines. Note that the reported times correspond to the run time of the entire program, not just the affected solver; if a program runs twice as fast with one particular ordering than with another one, then this means that the actual solver is actually several times faster.

    @@ -459,7 +459,7 @@
    -

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    You can specify that the components are ordered in a different way than suggested by the FESystem object you use. To this end, set up the vector target_component such that the entry at index i denotes the number of the target component for dofs with component i in the FESystem. Naming the same target component more than once is possible and results in a blocking of several components into one. This is discussed in step-22. If you omit this argument, the same order as given by the finite element is used.

    If one of the base finite elements from which the global finite element under consideration here, is a non-primitive one, i.e. its shape functions have more than one non-zero component, then it is not possible to associate these degrees of freedom with a single vector component. In this case, they are associated with the first vector component to which they belong.

    For finite elements with only one component, or a single non-primitive base element, this function is the identity operation.

    @@ -553,7 +553,7 @@
    -

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    Note
    This function only succeeds if each of the elements in the hp::FECollection attached to the DoFHandler argument has exactly the same number of blocks (see the glossary for more information). Note that this is not always given: while the hp::FECollection class ensures that all of its elements have the same number of vector components, they need not have the same number of blocks. At the same time, this function here needs to match individual blocks across elements and therefore requires that elements have the same number of blocks and that subsequent blocks in one element have the same meaning as in another element.

    Definition at line 999 of file dof_renumbering.cc.

    @@ -638,7 +638,7 @@
  • For meshes based on parallel::distributed::Triangulation, the locally owned cells of each MPI process are contiguous in Z order. That means that numbering degrees of freedom by visiting cells in Z order yields locally owned DoF indices that consist of contiguous ranges for each process. This is also true for the default ordering of DoFs on such triangulations, but the default ordering creates an enumeration that also depends on how many processors participate in the mesh, whereas the one generated by this function enumerates the degrees of freedom on a particular cell with indices that will be the same regardless of how many processes the mesh is split up between.
  • For meshes based on parallel::shared::Triangulation, the situation is more complex. Here, the set of locally owned cells is determined by a partitioning algorithm (selected by passing an object of type parallel::shared::Triangulation::Settings to the constructor of the triangulation), and in general these partitioning algorithms may assign cells to subdomains based on decisions that may have nothing to do with the Z order. (Though it is possible to select these flags in a way so that partitioning uses the Z order.) As a consequence, the cells of one subdomain are not contiguous in Z order, and if one renumbered degrees of freedom based on the Z order of cells, one would generally end up with DoF indices that on each processor do not form a contiguous range. This is often inconvenient (for example, because PETSc cannot store vectors and matrices for which the locally owned set of indices is not contiguous), and consequently this function uses the following algorithm for parallel::shared::Triangulation objects:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-03-17 21:57:41.799224145 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-03-17 21:57:41.803224170 +0000 @@ -298,7 +298,7 @@

    Detailed Description

    This is a collection of functions operating on, and manipulating the numbers of degrees of freedom. The documentation of the member functions will provide more information, but for functions that exist in multiple versions, there are sections in this global documentation stating some commonalities.

    Setting up sparsity patterns

    -

    When assembling system matrices, the entries are usually of the form $a_{ij} = a(\phi_i, \phi_j)$, where $a$ is a bilinear functional, often an integral. When using sparse matrices, we therefore only need to reserve space for those $a_{ij}$ only, which are nonzero, which is the same as to say that the basis functions $\phi_i$ and $\phi_j$ have a nonempty intersection of their support. Since the support of basis functions is bound only on cells on which they are located or to which they are adjacent, to determine the sparsity pattern it is sufficient to loop over all cells and connect all basis functions on each cell with all other basis functions on that cell. There may be finite elements for which not all basis functions on a cell connect with each other, but no use of this case is made since no examples where this occurs are known to the author.

    +

    When assembling system matrices, the entries are usually of the form $a_{ij} = a(\phi_i, \phi_j)$, where $a$ is a bilinear functional, often an integral. When using sparse matrices, we therefore only need to reserve space for those $a_{ij}$ only, which are nonzero, which is the same as to say that the basis functions $\phi_i$ and $\phi_j$ have a nonempty intersection of their support. Since the support of basis functions is bound only on cells on which they are located or to which they are adjacent, to determine the sparsity pattern it is sufficient to loop over all cells and connect all basis functions on each cell with all other basis functions on that cell. There may be finite elements for which not all basis functions on a cell connect with each other, but no use of this case is made since no examples where this occurs are known to the author.

    DoF numberings on boundaries

    When projecting the traces of functions to the boundary or parts thereof, one needs to build matrices and vectors that act only on those degrees of freedom that are located on the boundary, rather than on all degrees of freedom. One could do that by simply building matrices in which the entries for all interior DoFs are zero, but such matrices are always very rank deficient and not very practical to work with.

    What is needed instead in this case is a numbering of the boundary degrees of freedom, i.e. we should enumerate all the degrees of freedom that are sitting on the boundary, and exclude all other (interior) degrees of freedom. The map_dof_to_boundary_indices() function does exactly this: it provides a vector with as many entries as there are degrees of freedom on the whole domain, with each entry being the number in the numbering of the boundary or numbers::invalid_dof_index if the dof is not on the boundary.

    @@ -308,7 +308,7 @@

    (As a side note, for corner cases: The question what a degree of freedom on the boundary is, is not so easy. It should really be a degree of freedom of which the respective basis function has nonzero values on the boundary. At least for Lagrange elements this definition is equal to the statement that the off-point, or what deal.II calls support_point, of the shape function, i.e. the point where the function assumes its nominal value (for Lagrange elements this is the point where it has the function value 1), is located on the boundary. We do not check this directly, the criterion is rather defined through the information the finite element class gives: the FiniteElement class defines the numbers of basis functions per vertex, per line, and so on and the basis functions are numbered after this information; a basis function is to be considered to be on the face of a cell (and thus on the boundary if the cell is at the boundary) according to it belonging to a vertex, line, etc but not to the interior of the cell. The finite element uses the same cell-wise numbering so that we can say that if a degree of freedom was numbered as one of the dofs on lines, we assume that it is located on the line. Where the off-point actually is, is a secret of the finite element (well, you can ask it, but we don't do it here) and not relevant in this context.)

    Setting up sparsity patterns for boundary matrices

    In some cases, one wants to only work with DoFs that sit on the boundary. One application is, for example, if rather than interpolating non- homogeneous boundary values, one would like to project them. For this, we need two things: a way to identify nodes that are located on (parts of) the boundary, and a way to build matrices out of only degrees of freedom that are on the boundary (i.e. much smaller matrices, in which we do not even build the large zero block that stems from the fact that most degrees of freedom have no support on the boundary of the domain). The first of these tasks is done by the map_dof_to_boundary_indices() function (described above).

    -

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma \varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    +

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma \varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    Enumeration Type Documentation

    ◆ Coupling

    @@ -488,7 +488,7 @@

    Otherwise, if face_1 and face_2 are not active faces, this function loops recursively over the children of face_1 and face_2. If only one of the two faces is active, then we recursively iterate over the children of the non-active ones and make sure that the solution function on the refined side equals that on the non-refined face in much the same way as we enforce hanging node constraints at places where differently refined cells come together. (However, unlike hanging nodes, we do not enforce the requirement that there be only a difference of one refinement level between the two sides of the domain you would like to be periodic).

    This routine only constrains DoFs that are not already constrained. If this routine encounters a DoF that already is constrained (for instance by Dirichlet boundary conditions), the old setting of the constraint (dofs the entry is constrained to, inhomogeneities) is kept and nothing happens.

    The flags in the component_mask (see GlossComponentMask) denote which components of the finite element space shall be constrained with periodic boundary conditions. If it is left as specified by the default value all components are constrained. If it is different from the default value, it is assumed that the number of entries equals the number of components of the finite element. This can be used to enforce periodicity in only one variable in a system of equations.

    -

    face_orientation, face_flip and face_rotation describe an orientation that should be applied to face_1 prior to matching and constraining DoFs. This has nothing to do with the actual orientation of the given faces in their respective cells (which for boundary faces is always the default) but instead how you want to see periodicity to be enforced. For example, by using these flags, you can enforce a condition of the kind $u(0,y)=u(1,1-y)$ (i.e., a Moebius band) or in 3d a twisted torus. More precisely, these flags match local face DoF indices in the following manner:

    +

    face_orientation, face_flip and face_rotation describe an orientation that should be applied to face_1 prior to matching and constraining DoFs. This has nothing to do with the actual orientation of the given faces in their respective cells (which for boundary faces is always the default) but instead how you want to see periodicity to be enforced. For example, by using these flags, you can enforce a condition of the kind $u(0,y)=u(1,1-y)$ (i.e., a Moebius band) or in 3d a twisted torus. More precisely, these flags match local face DoF indices in the following manner:

    In 2d: face_orientation must always be true, face_rotation is always false, and face_flip has the meaning of line_flip; this implies e.g. for Q1:

    face_orientation = true, face_flip = false, face_rotation = false:
    @@ -561,7 +561,7 @@
    and any combination of that...

    Optionally a matrix matrix along with a std::vector first_vector_components can be specified that describes how DoFs on face_1 should be modified prior to constraining to the DoFs of face_2. Here, two declarations are possible: If the std::vector first_vector_components is non empty the matrix is interpreted as a dim $\times$ dim rotation matrix that is applied to all vector valued blocks listed in first_vector_components of the FESystem. If first_vector_components is empty the matrix is interpreted as an interpolation matrix with size no_face_dofs $\times$ no_face_dofs.

    This function makes sure that identity constraints don't create cycles in constraints.

    -

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    +

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    Detailed information can be found in the see Glossary entry on periodic boundary conditions.

    Definition at line 2292 of file dof_tools_constraints.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-03-17 21:57:41.839224392 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-03-17 21:57:41.839224392 +0000 @@ -165,7 +165,7 @@
    -

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    +

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    Definition at line 30 of file fe_series.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-03-17 21:57:41.891224713 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-03-17 21:57:41.891224713 +0000 @@ -188,7 +188,7 @@ void&#href_anchor"memTemplItemRight" valign="bottom">extrapolate (const DoFHandler< dim, spacedim > &dof1, const InVector &z1, const DoFHandler< dim, spacedim > &dof2, const AffineConstraints< typename OutVector::value_type > &constraints, OutVector &z2) &#href_anchor"details" id="details">

    Detailed Description

    This namespace offers interpolations and extrapolations of discrete functions of one FiniteElement fe1 to another FiniteElement fe2.

    -

    It also provides the local interpolation matrices that interpolate on each cell. Furthermore it provides the difference matrix $id-I_h$ that is needed for evaluating $(id-I_h)z$ for e.g. the dual solution $z$.

    +

    It also provides the local interpolation matrices that interpolate on each cell. Furthermore it provides the difference matrix $id-I_h$ that is needed for evaluating $(id-I_h)z$ for e.g. the dual solution $z$.

    For more information about the spacedim template parameter check the documentation of FiniteElement or the one of Triangulation.

    Function Documentation

    @@ -330,7 +330,7 @@ FullMatrix< number > & difference_matrix&#href_anchor"memdoc">

    Compute the identity matrix minus the back interpolation matrix. The difference_matrix will be of size (fe1.n_dofs_per_cell(), fe1.n_dofs_per_cell()) after this function. Previous content of the argument will be overwritten.

    -

    This function computes the matrix that transforms a fe1 function $z$ to $z-I_hz$ where $I_h$ denotes the interpolation operator from the fe1 space to the fe2 space. This matrix hence is useful to evaluate error-representations where $z$ denotes the dual solution.

    +

    This function computes the matrix that transforms a fe1 function $z$ to $z-I_hz$ where $I_h$ denotes the interpolation operator from the fe1 space to the fe2 space. This matrix hence is useful to evaluate error-representations where $z$ denotes the dual solution.

    @@ -377,20 +377,20 @@

    This is a rather specialized function used during the construction of finite element objects. It is used to build the basis of shape functions for an element, given a set of polynomials and interpolation points. The function is only implemented for finite elements with exactly dim vector components. In particular, this applies to classes derived from the FE_PolyTensor class.

    -

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    +

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    \begin{align*}
   \Psi_i [\varphi_j] = \sum_{k=1}^N c_{jk} \Psi_i[\tilde\varphi_k],
 \end{align*}

    -

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    +

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    \begin{align*}
   I = C X^T
 \end{align*}

    -

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    -

    Until the finite element receives this matrix $X$ back, it describes its shape functions (e.g., in FiniteElement::shape_value()) in the form $\tilde\varphi_j$. After it calls this function, it has the expansion coefficients and can describe its shape functions as $\varphi_j$.

    +

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    +

    Until the finite element receives this matrix $X$ back, it describes its shape functions (e.g., in FiniteElement::shape_value()) in the form $\tilde\varphi_j$. After it calls this function, it has the expansion coefficients and can describe its shape functions as $\varphi_j$.

    This function therefore computes this matrix $X$, for the following specific circumstances:

    @@ -898,7 +898,7 @@ OutVector & z1_difference&#href_anchor"memdoc"> -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    Note, that this function does not work for continuous elements at hanging nodes. For that case use the interpolation_difference function, below, that takes an additional AffineConstraints object.

    @@ -940,7 +940,7 @@ OutVector & z1_difference&#href_anchor"memdoc"> -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    For parallel computations, supply z1 with ghost elements and z1_difference without ghost elements.

    @@ -1011,7 +1011,7 @@
  • It then performs a loop over all non-active cells of dof2. If such a non-active cell has at least one active child, then we call the children of this cell a "patch". We then interpolate from the children of this patch to the patch, using the finite element space associated with dof2 and immediately interpolate back to the children. In essence, this information throws away all information in the solution vector that lives on a scale smaller than the patch cell.
  • Since we traverse non-active cells from the coarsest to the finest levels, we may find patches that correspond to child cells of previously treated patches if the mesh had been refined adaptively (this cannot happen if the mesh has been refined globally because there the children of a patch are all active). We also perform the operation described above on these patches, but it is easy to see that on patches that are children of previously treated patches, the operation is now the identity operation (since it interpolates from the children of the current patch a function that had previously been interpolated to these children from an even coarser patch). Consequently, this does not alter the solution vector any more.
  • -

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    +

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    Note
    The resulting field does not satisfy continuity requirements of the given finite elements if the algorithm outlined above is used. When you use continuous elements on grids with hanging nodes, please use the extrapolate function with an additional AffineConstraints argument, see below.
    Since this function operates on patches of cells, it requires that the underlying grid is refined at least once for every coarse grid cell. If this is not the case, an exception will be raised.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2024-03-17 21:57:41.927224936 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2024-03-17 21:57:41.931224960 +0000 @@ -127,13 +127,13 @@
    1. Tensor product construction (do_tensor_product=true): The tensor product construction, in the simplest case, builds a vector-valued element from scalar elements (see this documentation module and this glossary entry for more information). To give an example, consider creating a vector-valued element with two vector components, where the first should have linear shape functions and the second quadratic shape functions. In 1d, the shape functions (on the reference cell) of the base elements are then

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 &= \{ 1-x, x \},
   \\  Q_2 &= \{ 2(\frac 12 - x)(1-x), 2(x - \frac 12)x, 4x(1-x) \},
-\end{align*} +\end{align*}" src="form_1232.png"/>

      where shape functions are ordered in the usual way (first on the first vertex, then on the second vertex, then in the interior of the cell). The tensor product construction will create an element with the following shape functions:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 \times Q_2 &=
   \left\{
     \begin{pmatrix} 1-x \\ 0 \end{pmatrix},
@@ -142,7 +142,7 @@
     \begin{pmatrix} 0 \\ 2(x - \frac 12)x \end{pmatrix},
     \begin{pmatrix} 0 \\ 4x(1-x) \end{pmatrix}
   \right\}.
-\end{align*} +\end{align*}" src="form_1233.png"/>

      The list here is again in standard order.

      Of course, the procedure also works if the base elements are already vector valued themselves: in that case, the composed element simply has as many vector components as the base elements taken together.

      @@ -150,10 +150,10 @@
    2. Combining shape functions (do_tensor_product=false): In contrast to the previous strategy, combining shape functions simply takes all of the shape functions together. In the case above, this would yield the following element:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 + Q_2 &= \{ 1-x, 2(\frac 12 - x)(1-x),
                   x, 2(x - \frac 12)x, 4x(1-x) \}.
-\end{align*} +\end{align*}" src="form_1234.png"/>

      In other words, if the base elements are scalar, the resulting element will also be. In general, the base elements all will have to have the same number of vector components.

      The element constructed above of course no longer has a linearly independent set of shape functions. As a consequence, any matrix one creates by treating all shape functions of the composed element in the same way will be singular. In practice, this strategy is therefore typically used in situations where one explicitly makes sure that certain shape functions are treated differently (e.g., by multiplying them with weight functions), or in cases where the shape functions one combines are not linearly dependent.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-03-17 21:57:41.951225084 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-03-17 21:57:41.955225108 +0000 @@ -130,13 +130,13 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

      -

      Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

      +

      Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

      +

      Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

      The function value, gradient, and Hessian are computed at the box center. The bounds on the value of the function are then estimated as

      -

      $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
-   + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

      -

      Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

      -

      $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

      +

      $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
+   + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

      +

      Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

      +

      $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

      If the function has more than 1 component the component parameter can be used to specify which function component the bounds should be computed for.

      Definition at line 26 of file function_tools.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 2024-03-17 21:57:41.979225257 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 2024-03-17 21:57:41.979225257 +0000 @@ -135,9 +135,9 @@

      Create a partitioning of the given range of iterators so that iterators that point to conflicting objects will be placed into different partitions, where the question whether two objects conflict is determined by a user-provided function.

      This function can also be considered as a graph coloring: each object pointed to by an iterator is considered to be a node and there is an edge between each two nodes that conflict. The graph coloring algorithm then assigns a color to each node in such a way that two nodes connected by an edge do not have the same color.

      A typical use case for this function is in assembling a matrix in parallel. There, one would like to assemble local contributions on different cells at the same time (an operation that is purely local and so requires no synchronization) but then we need to add these local contributions to the global matrix. In general, the contributions from different cells may be to the same matrix entries if the cells share degrees of freedom and, consequently, can not happen at the same time unless we want to risk a race condition (see http://en.wikipedia.org/wiki/Race_condition). Thus, we call these two cells in conflict, and we can only allow operations in parallel from cells that do not conflict. In other words, two cells are in conflict if the set of matrix entries (for example characterized by the rows) have a nonempty intersection.

      -

      In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

      +

      In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

      Note
      The conflict set returned by the user defined function passed as third argument needs to accurately describe all degrees of freedom for which anything is written into the matrix or right hand side. In other words, if the writing happens through a function like AffineConstraints::copy_local_to_global(), then the set of conflict indices must actually contain not only the degrees of freedom on the current cell, but also those they are linked to by constraints such as hanging nodes.
      -

      In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

      +

      In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

      In any case, the result of the function will be so that iterators whose conflict indicator sets have overlap will not be assigned to the same color.

      Note
      The algorithm used in this function is described in a paper by Turcksin, Kronbichler and Bangerth, see workstream_paper.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-03-17 21:57:42.075225850 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-03-17 21:57:42.071225825 +0000 @@ -295,7 +295,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

      Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

      +

      Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

      If the argument colorize is false, then all boundary indicators are set to zero (the default boundary indicator) for 2d and 3d. If it is true, the boundary is colorized as in hyper_rectangle(). In 1d the indicators are always colorized, see hyper_rectangle().

      @@ -728,7 +728,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

      Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

        +

        Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

        1. If n_shells is greater than zero, then there are that many shells centered around the cylinder,
        2. @@ -752,10 +752,10 @@
          Parameters
          - + - +
          triaTriangulation to be created. Must be empty upon calling this function.
          shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
          shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
          n_shellsNumber of shells to use in the shell layer.
          skewnessParameter controlling how close the shells are to the cylinder: see the mathematical definition given in GridGenerator::concentric_hyper_shells.
          colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
          colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
          @@ -1307,7 +1307,7 @@ const double half_length = 1.&#href_anchor"memdoc"> -

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

          +

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

          The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

          The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

          Precondition
          The triangulation passed as argument needs to be empty when calling this function.
          @@ -1341,7 +1341,7 @@ const double half_length = 1.&#href_anchor"memdoc"> -

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

          +

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

          The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

          The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

          @@ -1539,7 +1539,7 @@
          Parameters
          - +
          triaA Triangulation object which has to be empty.
          sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
          sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
          colorize_cellsIf colorization is enabled, then the material id of a cells corresponds to the leg it is in. The id of the center cell is zero, and then the legs are numbered starting at one (see the glossary entry on colorization).
          @@ -1726,7 +1726,7 @@
        3. 96 for the rhombic dodecahedron refined once. This choice dates from an older version of deal.II before the Manifold classes were implemented: today this choce is equivalent to the rhombic dodecahedron after performing one global refinement.
        4. -Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
        5. +Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
        6. The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

          The 3d grids with 12 and 96 cells are plotted below:

          @@ -1921,7 +1921,7 @@ const unsigned int n_axial_cells = 0&#href_anchor"memdoc"> -

          Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

          +

          Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

          If n_radial_cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio. The same holds for n_axial_cells.

          Note
          Although this function is declared as a template, it does not make sense in 1d and 2d. Also keep in mind that this object is rotated and positioned differently than the one created by cylinder().

          All manifold ids are set to zero, and a CylindricalManifold is attached to the triangulation.

          @@ -1968,7 +1968,7 @@
      -

      Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

      +

      Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

      If dim is 3, the mesh will be the volume of the torus, using a mesh equivalent to the circle in the poloidal coordinates with 5 cells on the cross section. This function attaches a TorusManifold to all boundary faces which are marked with a manifold id of 1, a CylindricalManifold to the interior cells and all their faces which are marked with a manifold id of 2 (representing a flat state within the poloidal coordinates), and a TransfiniteInterpolationManifold to the cells between the TorusManifold on the surface and the ToroidalManifold in the center, with cells marked with manifold id 0.

      An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

      @@ -2027,7 +2027,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

      This function produces a square in the xy-plane with a cylindrical hole in the middle. The square and the circle are centered at the origin. In 3d, this geometry is extruded in $z$ direction to the interval $[0,L]$.

      +

      This function produces a square in the xy-plane with a cylindrical hole in the middle. The square and the circle are centered at the origin. In 3d, this geometry is extruded in $z$ direction to the interval $[0,L]$.

      The inner boundary has a manifold id of $0$ and a boundary id of $6$. This function attaches a PolarManifold or CylindricalManifold to the interior boundary in 2d and 3d respectively. The other faces have boundary ids of $0, 1, 2, 3, 4$, or $5$ given in the standard order of faces in 2d or 3d.

      @@ -2103,7 +2103,7 @@ {\tanh(\mathrm{skewness})}\right) \]" src="form_1359.png"/>

      -

      where skewness is a parameter controlling the shell spacing in the radial direction: values of skewness close to zero correspond to even spacing, while larger values of skewness (such as $2$ or $3$) correspond to shells biased to the inner radius.

      +

      where skewness is a parameter controlling the shell spacing in the radial direction: values of skewness close to zero correspond to even spacing, while larger values of skewness (such as $2$ or $3$) correspond to shells biased to the inner radius.

      n_cells_per_shell is the same as in GridGenerator::hyper_shell: in 2d the default choice of zero will result in 8 cells per shell (and 12 in 3d). The only valid values in 3d are 6 (the default), 12, and 96 cells: see the documentation of GridGenerator::hyper_shell for more information.

      If colorize is true then the outer boundary of the merged shells has a boundary id of $1$ and the inner boundary has a boundary id of $0$.

      Example: The following code (see, e.g., step-10 for instructions on how to visualize GNUPLOT output)

      @@ -2611,10 +2611,10 @@
      -

      Extrude the Triangulation input in the $z$ direction from $z = 0$ to $z =
-\text{height}$ and store it in result. This is done by replicating the input triangulation n_slices times in $z$ direction, and then forming (n_slices-1) layers of cells out of these replicates.

      -

      The boundary indicators of the faces of input will be assigned to the corresponding side walls in $z$ direction. The bottom and top get the next two free boundary indicators: i.e., if input has boundary ids of $0$, $1$, and $42$, then the $z = 0$ boundary id of result will be $43$ and the $z = \text{height}$ boundary id will be $44$.

      -

      This function does not, by default, copy manifold ids. The reason for this is that there is no way to set the manifold ids on the lines of the resulting Triangulation without more information: for example, if two faces of input with different manifold ids meet at a shared vertex then there is no a priori reason to pick one manifold id or another for the lines created in result that are parallel to the $z$-axis and pass through that point. If copy_manifold_ids is true then this function sets line manifold ids by picking the one that appears first in manifold_priorities. For example: if manifold_priorities is {0, 42, numbers::flat_manifold_id} and the line under consideration is adjacent to faces with manifold ids of 0 and 42, then that line will have a manifold id of 0. The correct ordering is almost always

        +

        Extrude the Triangulation input in the $z$ direction from $z = 0$ to $z =
+\text{height}$ and store it in result. This is done by replicating the input triangulation n_slices times in $z$ direction, and then forming (n_slices-1) layers of cells out of these replicates.

        +

        The boundary indicators of the faces of input will be assigned to the corresponding side walls in $z$ direction. The bottom and top get the next two free boundary indicators: i.e., if input has boundary ids of $0$, $1$, and $42$, then the $z = 0$ boundary id of result will be $43$ and the $z = \text{height}$ boundary id will be $44$.

        +

        This function does not, by default, copy manifold ids. The reason for this is that there is no way to set the manifold ids on the lines of the resulting Triangulation without more information: for example, if two faces of input with different manifold ids meet at a shared vertex then there is no a priori reason to pick one manifold id or another for the lines created in result that are parallel to the $z$-axis and pass through that point. If copy_manifold_ids is true then this function sets line manifold ids by picking the one that appears first in manifold_priorities. For example: if manifold_priorities is {0, 42, numbers::flat_manifold_id} and the line under consideration is adjacent to faces with manifold ids of 0 and 42, then that line will have a manifold id of 0. The correct ordering is almost always

        1. manifold ids set on the boundary,
        2. @@ -2632,8 +2632,8 @@
          Parameters
          - - + + @@ -2780,7 +2780,7 @@
          [in]inputA two-dimensional input triangulation.
          [in]n_slicesThe number of times the input triangulation will be replicated in $z$ direction. These slices will then be connected into (n_slices-1) layers of three-dimensional cells. Clearly, n_slices must be at least two.
          [in]heightThe distance in $z$ direction between the individual slices.
          [in]n_slicesThe number of times the input triangulation will be replicated in $z$ direction. These slices will then be connected into (n_slices-1) layers of three-dimensional cells. Clearly, n_slices must be at least two.
          [in]heightThe distance in $z$ direction between the individual slices.
          [out]resultThe resulting three-dimensional triangulation.
          [in]copy_manifold_idsSee the description above.
          [in]manifold_prioritiesSee the description above.
          Triangulation< dim, spacedim2 > & out_tria&#href_anchor"memdoc"> -

          Given an input triangulation in_tria, this function makes a new flat triangulation out_tria which contains a single level with all active cells of the input triangulation. If spacedim1 and spacedim2 are different, only the first few components of the vertex coordinates are copied over. This is useful to create a Triangulation<2,3> out of a Triangulation<2,2>, or to project a Triangulation<2,3> into a Triangulation<2,2>, by neglecting the $z$ components of the vertices.

          +

          Given an input triangulation in_tria, this function makes a new flat triangulation out_tria which contains a single level with all active cells of the input triangulation. If spacedim1 and spacedim2 are different, only the first few components of the vertex coordinates are copied over. This is useful to create a Triangulation<2,3> out of a Triangulation<2,2>, or to project a Triangulation<2,3> into a Triangulation<2,2>, by neglecting the $z$ components of the vertices.

          No internal checks are performed on the vertices, which are assumed to make sense topologically in the target spacedim2 dimensional space. If this is not the case, you will encounter problems when using the triangulation later on.

          All information about cell manifold indicators and material indicators are copied from one triangulation to the other. The same is true for the manifold indicators and, if an object is at the boundary, the boundary indicators of faces and edges of the triangulation.

          This function will fail if the input Triangulation is of type parallel::distributed::Triangulation, as well as when the input Triangulation contains hanging nodes. In other words, this function only works for globally refined triangulations.

          @@ -2908,7 +2908,7 @@
          const bool colorize = false&#href_anchor"memdoc"> -

          Initialize the given triangulation with a hypercube (square in 2d and cube in 3d) consisting of repetitions cells in each direction. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

          +

          Initialize the given triangulation with a hypercube (square in 2d and cube in 3d) consisting of repetitions cells in each direction. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

          Note
          This function connects internally 4/8 vertices to quadrilateral/hexahedral cells and subdivides these into 2/5 triangular/tetrahedral cells.

          Also see Simplex support.

          /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-03-17 21:57:42.107226048 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-03-17 21:57:42.111226072 +0000 @@ -216,7 +216,7 @@

          -

          As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

          +

          As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

          Note
          This function only sets the coarsening and refinement flags. The mesh is not changed until you call Triangulation::execute_coarsening_and_refinement().
          Parameters
          @@ -276,14 +276,14 @@

          This function provides a strategy to mark cells for refinement and coarsening with the goal of controlling the reduction of the error estimate.

          Also known as the bulk criterion or Dörfler marking, this function computes the thresholds for refinement and coarsening such that the criteria of cells getting flagged for refinement make up for a certain fraction of the total error. We explain its operation for refinement, coarsening works analogously.

          Let cK be the criterion of cell K. Then the total error estimate is computed by the formula

          -\[
+<picture><source srcset=\[
 E = \sum_{K\in \cal T} c_K.
-\] +\]" src="form_1368.png"/>

          -

          If 0 < a < 1 is top_fraction, then we refine the smallest subset $\cal M$ of the Triangulation $\cal T$ such that

          -\[
+<p>If <em> 0 < a < 1</em> is <code>top_fraction</code>, then we refine the smallest subset <picture><source srcset=$\cal M$ of the Triangulation $\cal T$ such that

          +\[
 a E \le \sum_{K\in \cal M} c_K
-\] +\]" src="form_1371.png"/>

          The algorithm is performed by the greedy algorithm described in refine_and_coarsen_fixed_number().

          Note
          The often used formula with squares on the left and right is recovered by actually storing the square of cK in the vector criteria.
          @@ -326,32 +326,32 @@
          const unsigned int order = 2&#href_anchor"memdoc"> -

          This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell $K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

          -

          With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

          -\[
+<p>This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell <picture><source srcset=$K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

          +

          With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

          +\[
   N(m) = (N_0-m) + 2^d m = N_0 + (2^d-1)m
-\] +\]" src="form_1375.png"/>

          -

          cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

          -\[
+<p> cells ( <picture><source srcset=$N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

          +\[
   \eta^\text{exp}(m)
   =
   \sum_{K, K\; \text{will not be refined}} \eta_K
   +
   \sum_{K, K\; \text{will be refined}} 2^{-\text{order}}\eta_K
-\] +\]" src="form_1378.png"/>

          -

          where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

          -

          This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

          -\[
+<p> where the first sum extends over <picture><source srcset=$N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

          +

          This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

          +\[
   J(m) = N(m)^{\text{order}/d} \eta^\text{exp}(m)
-\] +\]" src="form_1381.png"/>

          is minimal.

          The rationale for this function is two-fold. First, compared to the refine_and_coarsen_fixed_fraction() and refine_and_coarsen_fixed_number() functions, this function has the property that if all refinement indicators are the same (i.e., we have achieved a mesh where the error per cell is equilibrated), then the entire mesh is refined. This is based on the observation that a mesh with equilibrated error indicators is the optimal mesh (i.e., has the least overall error) among all meshes with the same number of cells. (For proofs of this, see R. Becker, M. Braack, R. Rannacher: "Numerical simulation of laminar flames at low Mach number with adaptive finite elements", Combustion Theory and Modelling, Vol. 3, Nr. 3, p. 503-534 1999; and W. Bangerth, R. Rannacher: "Adaptive Finite Element Methods for Differential Equations", Birkhauser, 2003.)

          -

          Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

          +

          Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

          Note
          This function was originally implemented by Thomas Richter. It follows a strategy described in [Richter2005]. See in particular Section 4.3, pp. 42-43.

          Definition at line 448 of file grid_refinement.cc.

          /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-03-17 21:57:42.231226813 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-03-17 21:57:42.235226838 +0000 @@ -510,8 +510,8 @@
          -

          Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

          +

          Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

          If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim).

          This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

          Parameters
          @@ -543,8 +543,8 @@
          const Mapping< dim, spacedim > & mapping&#href_anchor"memdoc"> -

          Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

          +

          Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

          If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim.

          This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

          Parameters
          @@ -675,8 +675,8 @@
      -

      This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
-p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

      +

      This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
+p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

      For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping, even in cases where the actual transformation by a bi-/trilinear or higher order mapping might be singular. The result is exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

      This approximation is underlying the function TriaAccessor::real_to_unit_cell_affine_approximation() function.

      For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

      @@ -707,7 +707,7 @@ const Quadrature< dim > & quadrature&#href_anchor"memdoc"> -

      Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

      +

      Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

      Note
      Inverted elements do not throw an exception. Instead, a value of inf is written into the vector in case of inverted elements.
      Make sure to use enough quadrature points for a precise calculation of the aspect ratio in case of deformed elements.
      @@ -881,7 +881,7 @@ const double tol = 1e-12&#href_anchor"memdoc">

      Remove vertices that are duplicated, due to the input of a structured grid, for example. If these vertices are not removed, the faces bounded by these vertices become part of the boundary, even if they are in the interior of the mesh.

      -

      This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

      +

      This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

      Two vertices are considered equal if their difference in each coordinate direction is less than tol. This implies that nothing happens if the tolerance is set to zero.

      Definition at line 761 of file grid_tools.cc.

      @@ -1016,7 +1016,7 @@ Triangulation< dim, spacedim > & triangulation&#href_anchor"memdoc">

      Transform the vertices of the given triangulation by applying the function object provided as first argument to all its vertices.

      -

      The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

      +

      The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

      ... // fill triangulation with something
      {
      @@ -1197,13 +1197,13 @@ const bool solve_for_absolute_positions = false&#href_anchor"memdoc">

      Transform the given triangulation smoothly to a different domain where, typically, each of the vertices at the boundary of the triangulation is mapped to the corresponding points in the new_points map.

      -

      The unknown displacement field $u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

      -\[ \min\, \int \frac{1}{2}
+<p>The unknown displacement field <picture><source srcset=$u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

      +\[ \min\, \int \frac{1}{2}
   c(\mathbf x)
   \mathbf \nabla u_d(\mathbf x) \cdot
   \mathbf \nabla u_d(\mathbf x)
   \,\rm d x
-\] +\]" src="form_1395.png"/>

      subject to prescribed constraints. The minimizer is obtained by solving the Laplace equation of the dim components of a displacement field that maps the current domain into one described by new_points . Linear finite elements with four Gaussian quadrature points in each direction are used. The difference between the vertex positions specified in new_points and their current value in tria therefore represents the prescribed values of this displacement field at the boundary of the domain, or more precisely at all of those locations for which new_points provides values (which may be at part of the boundary, or even in the interior of the domain). The function then evaluates this displacement field at each unconstrained vertex and uses it to place the mapped vertex where the displacement field locates it. Because the solution of the Laplace equation is smooth, this guarantees a smooth mapping from the old domain to the new one.

      Parameters
      @@ -2984,7 +2984,7 @@

      This function does the same as the previous one, i.e. it partitions a triangulation using a partitioning algorithm into a number of subdomains identified by the cell->subdomain_id() flag.

      The difference to the previous function is the second argument, a sparsity pattern that represents the connectivity pattern between cells.

      -

      While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

      +

      While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

      This function is mainly useful in cases where connections between cells exist that are not present in the triangulation alone (otherwise the previous function would be the simpler one to use). Such connections may include that certain parts of the boundary of a domain are coupled through symmetric boundary conditions or integrals (e.g. friction contact between the two sides of a crack in the domain), or if a numerical scheme is used that not only connects immediate neighbors but a larger neighborhood of cells (e.g. when solving integral equations).

      In addition, this function may be useful in cases where the default sparsity pattern is not entirely sufficient. This can happen because the default is to just consider face neighbors, not neighboring cells that are connected by edges or vertices. While the latter couple when using continuous finite elements, they are typically still closely connected in the neighborship graph, and partitioning algorithm will not usually cut important connections in this case. However, if there are vertices in the mesh where many cells (many more than the common 4 or 6 in 2d and 3d, respectively) come together, then there will be a significant number of cells that are connected across a vertex, but several degrees removed in the connectivity graph built only using face neighbors. In a case like this, partitioning algorithm may sometimes make bad decisions and you may want to build your own connectivity graph.

      Note
      If the weight signal has been attached to the triangulation, then this will be used and passed to the partitioner.
      @@ -3511,7 +3511,7 @@

      An orthogonal equality test for faces.

      face1 and face2 are considered equal, if a one to one matching between its vertices can be achieved via an orthogonal equality relation.

      -

      Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

      +

      Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

      If the matching was successful, the relative orientation of face1 with respect to face2 is returned in the bitset orientation, where

      orientation[0] -> face_orientation
      orientation[1] -> face_flip
      orientation[2] -> face_rotation
      @@ -3626,8 +3626,8 @@

      This function tries to match all faces belonging to the first boundary with faces belonging to the second boundary with the help of orthogonal_equality().

      The bitset that is returned inside of PeriodicFacePair encodes the relative orientation of the first face with respect to the second face, see the documentation of orthogonal_equality() for further details.

      The direction refers to the space direction in which periodicity is enforced. When matching periodic faces this vector component is ignored.

      -

      The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

      -

      Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

      +

      The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

      +

      Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

      Template Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-03-17 21:57:42.275227085 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-03-17 21:57:42.275227085 +0000 @@ -127,9 +127,9 @@

      The namespace L2 contains functions for mass matrices and L2-inner products.

      Notational conventions

      In most cases, the action of a function in this namespace can be described by a single integral. We distinguish between integrals over cells Z and over faces F. If an integral is denoted as

      -\[
+<picture><source srcset=\[
   \int_Z u \otimes v \,dx,
-\] +\]" src="form_1564.png"/>

      it will yield the following results, depending on the type of operation

      • @@ -139,7 +139,7 @@
      • If the function returns a number, then this number is the integral of the two given functions u and v.
      -

      We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

      +

      We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

      Signature of functions

      Functions in this namespace follow a generic signature. In the simplest case, you have two related functions

      template <int dim>
      void
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-03-17 21:57:42.315227332 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-03-17 21:57:42.319227356 +0000 @@ -271,8 +271,8 @@

      Vector-valued advection residual operator in strong form

      -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
-\cdot\mathbf v_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
+\cdot\mathbf v_i \, dx. \]

      Warning
      This is not the residual consistent with cell_matrix(), but with its transpose.
      @@ -321,7 +321,7 @@

      Scalar advection residual operator in weak form

      -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \]

      Definition at line 216 of file advection.h.

      @@ -369,8 +369,8 @@

      Vector-valued advection residual operator in weak form

      -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
-\cdot\mathbf u_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
+\cdot\mathbf u_i \, dx. \]

      Definition at line 256 of file advection.h.

      @@ -410,11 +410,11 @@
      MeshTypeA type that satisfies the requirements of the MeshType concept.
      double factor = 1.&#href_anchor"memdoc">

      Upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and zero else:

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 [\mathbf w\cdot\mathbf n]_+
 u_i v_j \, ds
-\] +\]" src="form_1518.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected by the same velocity.

      @@ -468,13 +468,13 @@

      Scalar case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1519.png"/>

      -

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      +

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected by the same velocity.

      @@ -527,13 +527,13 @@

      Vector-valued case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1519.png"/>

      -

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      +

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected by the same velocity.

      @@ -599,13 +599,13 @@ const double factor = 1.&#href_anchor"memdoc">

      Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected the same way.

      @@ -662,13 +662,13 @@ const double factor = 1.&#href_anchor"memdoc">

      Scalar case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected the same way.

      @@ -725,13 +725,13 @@ const double factor = 1.&#href_anchor"memdoc">

      Vector-valued case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected the same way.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-03-17 21:57:42.351227554 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-03-17 21:57:42.355227579 +0000 @@ -157,7 +157,7 @@ double factor = 1.&#href_anchor"memdoc">

      Cell matrix for divergence. The derivative is on the trial function.

      -\[ \int_Z v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z v\nabla \cdot \mathbf u \,dx \]

      This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

      @@ -193,8 +193,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the divergence operator in strong form.

      -\[ \int_Z
-v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z
+v\nabla \cdot \mathbf u \,dx \]

      This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

      The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.

      @@ -231,8 +231,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the divergence operator in weak form.

      -\[ - \int_Z
-\nabla v \cdot \mathbf u \,dx \] +\[ - \int_Z
+\nabla v \cdot \mathbf u \,dx \]

      This is the weak divergence operator and the test space should be at least H1. The trial functions may be discontinuous.

      Todo
      Verify: The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.
      @@ -269,8 +269,8 @@ double factor = 1.&#href_anchor"memdoc">

      Cell matrix for gradient. The derivative is on the trial function.

      -\[
-\int_Z \nabla u \cdot \mathbf v\,dx \] +\[
+\int_Z \nabla u \cdot \mathbf v\,dx \]

      This is the strong gradient and the trial space should be at least in H1. The test functions can be discontinuous.

      @@ -306,8 +306,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the gradient operator in strong form.

      -\[ \int_Z
-\mathbf v\cdot\nabla u \,dx \] +\[ \int_Z
+\mathbf v\cdot\nabla u \,dx \]

      This is the strong gradient operator and the trial space should be at least H1. The test functions may be discontinuous.

      The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.

      @@ -344,8 +344,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the gradient operator in weak form.

      -\[ -\int_Z
-\nabla\cdot \mathbf v u \,dx \] +\[ -\int_Z
+\nabla\cdot \mathbf v u \,dx \]

      This is the weak gradient operator and the test space should be at least Hdiv. The trial functions may be discontinuous.

      Todo
      Verify: The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.
      @@ -382,7 +382,7 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

      -\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \] +\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \]

      Definition at line 259 of file divergence.h.

      @@ -422,9 +422,9 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

      -\[
+<picture><source srcset=\[
 \int_F (\mathbf u\cdot \mathbf n) v \,ds
-\] +\]" src="form_1529.png"/>

      Definition at line 292 of file divergence.h.

      @@ -459,9 +459,9 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the gradient operator, namely the product of the normal component of the vector valued test space and the trial space.

      -\[
+<picture><source srcset=\[
 \int_F u (\mathbf v\cdot \mathbf n) \,ds
-\] +\]" src="form_1530.png"/>

      Definition at line 324 of file divergence.h.

      @@ -521,10 +521,10 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the divergence operator, namely the product of the jump of the normal component of the vector valued trial function and the mean value of the test function.

      -\[
+<picture><source srcset=\[
 \int_F (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
 \frac{v_1+v_2}{2} \,ds
-\] +\]" src="form_1531.png"/>

      Definition at line 358 of file divergence.h.

      @@ -574,12 +574,12 @@ double factor = 1.&#href_anchor"memdoc">

      The jump of the normal component

      -\[
+<picture><source srcset=\[
 \int_F
  (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
  (\mathbf v_1\cdot \mathbf n_1 + \mathbf v_2 \cdot \mathbf n_2)
 \,ds
-\] +\]" src="form_1532.png"/>

      Definition at line 417 of file divergence.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-03-17 21:57:42.387227776 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-03-17 21:57:42.391227801 +0000 @@ -154,7 +154,7 @@

      The linear elasticity operator in weak form, namely double contraction of symmetric gradients.

      -\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \] +\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \]

      Definition at line 51 of file elasticity.h.

      @@ -197,7 +197,7 @@

      Vector-valued residual operator for linear elasticity in weak form

      -\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \] +\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \]

      Definition at line 84 of file elasticity.h.

      @@ -239,10 +239,10 @@

      The matrix for the weak boundary condition of Nitsche type for linear elasticity:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n\Bigr)\;ds.
-\] +\]" src="form_1535.png"/>

      Definition at line 123 of file elasticity.h.

      @@ -284,10 +284,10 @@

      The matrix for the weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau \cdot v_\tau - n^T \epsilon(u_\tau) v_\tau -
 u_\tau^T \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1536.png"/>

      Definition at line 178 of file elasticity.h.

      @@ -337,12 +337,12 @@ double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) \cdot v - n^T \epsilon(u) v - (u-g) \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1537.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      Definition at line 257 of file elasticity.h.

      @@ -398,10 +398,10 @@

      The weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u_\tau-g_\tau) \cdot v_\tau - n^T \epsilon(u_\tau) v
 - (u_\tau-g_\tau) \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1539.png"/>

      Definition at line 309 of file elasticity.h.

      @@ -446,12 +446,12 @@ double factor = 1.&#href_anchor"memdoc">

      Homogeneous weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1540.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      Definition at line 387 of file elasticity.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-03-17 21:57:42.423227999 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-03-17 21:57:42.423227999 +0000 @@ -137,9 +137,9 @@ double factor = 1.&#href_anchor"memdoc">

      The weak form of the grad-div operator penalizing volume changes

      -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1541.png"/>

      Definition at line 52 of file grad_div.h.

      @@ -174,9 +174,9 @@ const double factor = 1.&#href_anchor"memdoc">

      The weak form of the grad-div residual

      -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1541.png"/>

      Definition at line 86 of file grad_div.h.

      @@ -278,7 +278,7 @@ - (\mathbf u-\mathbf g) \cdot \mathbf n \nabla \cdot v\Bigr)\;ds. \]" src="form_1543.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      Definition at line 174 of file grad_div.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-03-17 21:57:42.447228147 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-03-17 21:57:42.455228197 +0000 @@ -138,12 +138,12 @@ const double factor = 1.&#href_anchor"memdoc">

      The mass matrix for scalar or vector values finite elements.

      -\[ \int_Z
-uv\,dx \quad \text{or} \quad \int_Z \mathbf u\cdot \mathbf v\,dx \] +\[ \int_Z
+uv\,dx \quad \text{or} \quad \int_Z \mathbf u\cdot \mathbf v\,dx \]

      Likewise, this term can be used on faces, where it computes the integrals

      -\[ \int_F uv\,ds \quad \text{or} \quad \int_F \mathbf u\cdot
-\mathbf v\,ds \] +\[ \int_F uv\,ds \quad \text{or} \quad \int_F \mathbf u\cdot
+\mathbf v\,ds \]

      Parameters
      @@ -181,18 +181,18 @@
      const std::vector< double > & weights&#href_anchor"memdoc">

      The weighted mass matrix for scalar or vector values finite elements.

      -\[ \int_Z \omega(x) uv\,dx \quad \text{or} \quad \int_Z \omega(x)
-\mathbf u\cdot \mathbf v\,dx \] +\[ \int_Z \omega(x) uv\,dx \quad \text{or} \quad \int_Z \omega(x)
+\mathbf u\cdot \mathbf v\,dx \]

      Likewise, this term can be used on faces, where it computes the integrals

      -\[ \int_F \omega(x) uv\,ds \quad \text{or} \quad \int_F
-\omega(x) \mathbf u\cdot \mathbf v\,ds \] +\[ \int_F \omega(x) uv\,ds \quad \text{or} \quad \int_F
+\omega(x) \mathbf u\cdot \mathbf v\,ds \]

      Parameters
      - +
      MThe weighted mass matrix obtained as result.
      feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
      weightsThe weights, $\omega(x)$, evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      weightsThe weights, $\omega(x)$, evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      @@ -230,13 +230,13 @@
      const double factor = 1.&#href_anchor"memdoc">

      L2-inner product for scalar functions.

      -\[ \int_Z fv\,dx \quad \text{or} \quad \int_F fv\,ds \] +\[ \int_Z fv\,dx \quad \text{or} \quad \int_F fv\,ds \]

      Parameters
      - +
      resultThe vector obtained as result.
      feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
      inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      factorA constant that multiplies the result.
      @@ -274,14 +274,14 @@
      const double factor = 1.&#href_anchor"memdoc">

      L2-inner product for a slice of a vector valued right hand side.

      -\[ \int_Z \mathbf f\cdot \mathbf v\,dx \quad \text{or}
-\quad \int_F \mathbf f\cdot \mathbf v\,ds \] +\[ \int_Z \mathbf f\cdot \mathbf v\,dx \quad \text{or}
+\quad \int_F \mathbf f\cdot \mathbf v\,ds \]

      Parameters
      - +
      resultThe vector obtained as result.
      feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
      inputThe vector valued representation of $\mathbf f$ evaluated at the quadrature points in the finite element (size of each component must be equal to the number of quadrature points in the element).
      inputThe vector valued representation of $\mathbf f$ evaluated at the quadrature points in the finite element (size of each component must be equal to the number of quadrature points in the element).
      factorA constant that multiplies the result.
      @@ -338,9 +338,9 @@
      const double factor2 = 1.&#href_anchor"memdoc"> -

      The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

      -\[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
-\int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \] +

      The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

      +\[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
+\int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \]

      Using appropriate weights, this term can be used to penalize violation of conformity in H1.

      Note that for the parameters that follow, the external matrix refers to the flux between cells, while the internal matrix refers to entries coupling inside the cell.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-03-17 21:57:42.491228419 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-03-17 21:57:42.495228443 +0000 @@ -152,8 +152,8 @@
      const double factor = 1.&#href_anchor"memdoc">

      Laplacian in weak form, namely on the cell Z the matrix

      -\[
-\int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[
+\int_Z \nu \nabla u \cdot \nabla v \, dx. \]

      The FiniteElement in fe may be scalar or vector valued. In the latter case, the Laplacian is applied to each component separately.

      @@ -197,7 +197,7 @@

      Laplacian residual operator in weak form

      -\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \]

      Definition at line 92 of file laplace.h.

      @@ -240,7 +240,7 @@

      Vector-valued Laplacian residual operator in weak form

      -\[ \int_Z \nu \nabla u : \nabla v \, dx. \] +\[ \int_Z \nu \nabla u : \nabla v \, dx. \]

      Definition at line 119 of file laplace.h.

      @@ -275,11 +275,11 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition of Nitsche type for the Laplacian, namely on the face F the matrix

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u v - \partial_n u v - u \partial_n v\Bigr)\;ds.
-\] +\]" src="form_1557.png"/>

      -

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      +

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      Definition at line 157 of file laplace.h.

      @@ -313,12 +313,12 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition of Nitsche type for the Laplacian applied to the tangential component only, namely on the face F the matrix

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau v_\tau - \partial_n u_\tau v_\tau - u_\tau
 \partial_n v_\tau\Bigr)\;ds.
-\] +\]" src="form_1558.png"/>

      -

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      +

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      Definition at line 198 of file laplace.h.

      @@ -367,12 +367,12 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition for the Laplace operator by Nitsche, scalar version, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) v - \partial_n u v - (u-g) \partial_n
 v\Bigr)\;ds.
-\] +\]" src="form_1559.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      Definition at line 261 of file laplace.h.

      @@ -421,13 +421,13 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u- \mathbf g) \cdot \mathbf v
 - \partial_n \mathbf u \cdot \mathbf v
 - (\mathbf u-\mathbf g) \cdot \partial_n \mathbf v\Bigr)\;ds.
-\] +\]" src="form_1560.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      Definition at line 308 of file laplace.h.

      @@ -486,10 +486,10 @@
      double factor2 = -1.&#href_anchor"memdoc">

      Flux for the interior penalty method for the Laplacian, namely on the face F the matrices associated with the bilinear form

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1561.png"/>

      The penalty parameter should always be the mean value of the penalties needed for stability on each side. In the case of constant coefficients, it can be computed using compute_penalty().

      If factor2 is missing or negative, the factor is assumed the same on both sides. If factors differ, note that the penalty parameter has to be computed accordingly.

      @@ -551,10 +551,10 @@
      double factor2 = -1.&#href_anchor"memdoc">

      Flux for the interior penalty method for the Laplacian applied to the tangential components of a vector field, namely on the face F the matrices associated with the bilinear form

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u_\tau][v_\tau] - \{\nabla u_\tau\}[v_\tau\mathbf
 n] - [u_\tau\mathbf n]\{\nabla v_\tau\} \Bigr) \; ds.
-\] +\]" src="form_1562.png"/>

      Warning
      This function is still under development!
      @@ -625,10 +625,10 @@
      double ext_factor = -1.&#href_anchor"memdoc">

      Residual term for the symmetric interior penalty method:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1561.png"/>

      Definition at line 544 of file laplace.h.

      @@ -698,11 +698,11 @@
      double ext_factor = -1.&#href_anchor"memdoc">

      Vector-valued residual term for the symmetric interior penalty method:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u]\cdot[\mathbf v]
 - \{\nabla \mathbf u\}[\mathbf v\otimes \mathbf n]
 - [\mathbf u\otimes \mathbf n]\{\nabla \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1563.png"/>

      Definition at line 611 of file laplace.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-03-17 21:57:42.523228616 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-03-17 21:57:42.527228641 +0000 @@ -120,22 +120,22 @@

      Local integrators related to curl operators and their traces.

      We use the following conventions for curl operators. First, in three space dimensions

      -\[
+<picture><source srcset=\[
 \nabla\times \mathbf u = \begin{pmatrix}
   \partial_2 u_3 - \partial_3 u_2 \\
   \partial_3 u_1 - \partial_1 u_3 \\
   \partial_1 u_2 - \partial_2 u_1
 \end{pmatrix}.
-\] +\]" src="form_1566.png"/>

      -

      In two space dimensions, the curl is obtained by extending a vector u to $(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

      -\[
+<p>In two space dimensions, the curl is obtained by extending a vector <b>u</b> to <picture><source srcset=$(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

      +\[
  \nabla \times \mathbf u = \partial_1 u_2 - \partial_2 u_1,
  \qquad
  \nabla \times p = \begin{pmatrix}
    \partial_2 p \\ -\partial_1 p
  \end{pmatrix}
-\] +\]" src="form_1569.png"/>

      Function Documentation

      @@ -161,7 +161,7 @@
      const Tensor< 2, dim > & h2&#href_anchor"memdoc">

      Auxiliary function. Given the tensors of dim second derivatives, compute the curl of the curl of a vector function. The result in two and three dimensions is:

      -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 - \partial_2^2 u_1 \\
 \partial_1\partial_2 u_1 - \partial_1^2 u_2
@@ -175,7 +175,7 @@
 \partial_3\partial_1 u_1 + \partial_3\partial_2 u_2
 - (\partial_1^2+\partial_2^2) u_3
 \end{pmatrix}
-\] +\]" src="form_1570.png"/>

      Note
      The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
      @@ -211,9 +211,9 @@
      const Tensor< 1, dim > & normal&#href_anchor"memdoc">

      Auxiliary function. Given dim tensors of first derivatives and a normal vector, compute the tangential curl

      -\[
+<picture><source srcset=\[
 \mathbf n \times \nabla \times u.
-\] +\]" src="form_1571.png"/>

      Note
      The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
      @@ -244,10 +244,10 @@
      const double factor = 1.&#href_anchor"memdoc">

      The curl-curl operator

      -\[
+<picture><source srcset=\[
 \int_Z \nabla\times u \cdot
 \nabla \times v \,dx
-\] +\]" src="form_1572.png"/>

      in weak form.

      @@ -283,9 +283,9 @@
      double factor = 1.&#href_anchor"memdoc">

      The matrix for the curl operator

      -\[
+<picture><source srcset=\[
 \int_Z \nabla \times u \cdot v \,dx.
-\] +\]" src="form_1573.png"/>

      This is the standard curl operator in 3d and the scalar curl in 2d. The vector curl operator can be obtained by exchanging test and trial functions.

      @@ -327,14 +327,14 @@
      double factor = 1.&#href_anchor"memdoc">

      The matrix for weak boundary condition of Nitsche type for the tangential component in Maxwell systems.

      -\[
+<picture><source srcset=\[
 \int_F \biggl( 2\gamma
 (u\times n) (v\times n) -
 (u\times n)(\nu \nabla\times
 v) - (v\times
 n)(\nu \nabla\times u)
 \biggr)
-\] +\]" src="form_1574.png"/>

      Definition at line 265 of file maxwell.h.

      @@ -364,10 +364,10 @@
      double factor = 1.&#href_anchor"memdoc">

      The product of two tangential traces,

      -\[
+<picture><source srcset=\[
 \int_F (u\times n)(v\times n)
 \, ds.
-\] +\]" src="form_1575.png"/>

      Definition at line 328 of file maxwell.h.

      @@ -435,14 +435,14 @@

      The interior penalty fluxes for Maxwell systems.

      -\[
+<picture><source srcset=\[
 \int_F \biggl( \gamma
 \{u\times n\}\{v\times n\} -
 \{u\times n\}\{\nu \nabla\times
 v\}- \{v\times
 n\}\{\nu \nabla\times u\}
 \biggr)\;dx
-\] +\]" src="form_1576.png"/>

      Definition at line 385 of file maxwell.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceMatrixCreator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceMatrixCreator.html 2024-03-17 21:57:42.575228938 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceMatrixCreator.html 2024-03-17 21:57:42.579228963 +0000 @@ -150,7 +150,7 @@

      At present there are functions to create the following matrices:

      • create_mass_matrix: create the matrix with entries $m_{ij} =
-\int_\Omega \phi_i(x) \phi_j(x) dx$ by numerical quadrature. Here, the $\phi_i$ are the basis functions of the finite element space given.

        +\int_\Omega \phi_i(x) \phi_j(x) dx$" src="form_2206.png"/> by numerical quadrature. Here, the $\phi_i$ are the basis functions of the finite element space given.

        A coefficient may be given to evaluate $m_{ij} = \int_\Omega a(x) \phi_i(x)
 \phi_j(x) dx$ instead.

        @@ -167,7 +167,7 @@

        If the finite element for which the mass matrix or the Laplace matrix is to be built has more than one component, the functions accept a single coefficient as well as a vector valued coefficient function. For the latter case, the number of components must coincide with the number of components of the system finite element.

        Matrices on the boundary

        The create_boundary_mass_matrix() creates the matrix with entries $m_{ij} =
-\int_{\Gamma} \phi_i \phi_j dx$, where $\Gamma$ is the union of boundary parts with indicators contained in a std::map<types::boundary_id, const Function<spacedim,number>*> passed to the function (i.e. if you want to set up the mass matrix for the parts of the boundary with indicators zero and 2, you pass the function a map with key type types::boundary_id as the parameter boundary_functions containing the keys zero and 2). The size of the matrix is equal to the number of degrees of freedom that have support on the boundary, i.e. it is not a matrix on all degrees of freedom, but only a subset. (The $\phi_i$ in the formula are the subset of basis functions which have at least part of their support on $\Gamma$.) In order to determine which shape functions are to be considered, and in order to determine in which order, the function takes a dof_to_boundary_mapping; this object maps global DoF numbers to a numbering of the degrees of freedom located on the boundary, and can be obtained using the function DoFTools::map_dof_to_boundary_indices().

        +\int_{\Gamma} \phi_i \phi_j dx$" src="form_2210.png"/>, where $\Gamma$ is the union of boundary parts with indicators contained in a std::map<types::boundary_id, const Function<spacedim,number>*> passed to the function (i.e. if you want to set up the mass matrix for the parts of the boundary with indicators zero and 2, you pass the function a map with key type types::boundary_id as the parameter boundary_functions containing the keys zero and 2). The size of the matrix is equal to the number of degrees of freedom that have support on the boundary, i.e. it is not a matrix on all degrees of freedom, but only a subset. (The $\phi_i$ in the formula are the subset of basis functions which have at least part of their support on $\Gamma$.) In order to determine which shape functions are to be considered, and in order to determine in which order, the function takes a dof_to_boundary_mapping; this object maps global DoF numbers to a numbering of the degrees of freedom located on the boundary, and can be obtained using the function DoFTools::map_dof_to_boundary_indices().

        In order to work, the function needs a matrix of the correct size, built on top of a corresponding sparsity pattern. Since we only work on a subset of the degrees of freedom, we can't use the matrices and sparsity patterns that are created for the entire set of degrees of freedom. Rather, you should use the DoFHandler::make_boundary_sparsity_pattern() function to create the correct sparsity pattern, and build a matrix on top of it.

        Note that at present there is no function that computes the mass matrix for all shape functions, though such a function would be trivial to implement.

        Right hand sides

        /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-03-17 21:57:42.611229160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-03-17 21:57:42.619229209 +0000 @@ -162,8 +162,8 @@
      -

      Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

      -

      inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

      +

      Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

      +

      inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

      over the cell/face. The value "unassigned" is used to describe that the location of a cell/face has not yet been determined.

      @@ -229,17 +229,17 @@
      Enumerator
      inside 
      const AffineConstraints< number > & immersed_constraints = AffineConstraints<number>()&#href_anchor"memdoc">

      Create a coupling sparsity pattern for non-matching, overlapping grids.

      -

      Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2012.png"/>

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      -

      The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      +

      The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

      The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero than the other, then the excess components will be ignored.

      -

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      +

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

      See the tutorial program step-60 for an example on how to use this function.

      @@ -357,17 +357,17 @@
      const AffineConstraints< typename Matrix::value_type > & immersed_constraints = AffineConstraints<typename&#href_anchor"memdoc">

      Create a coupling mass matrix for non-matching, overlapping grids.

      -

      Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

      -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

      +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2012.png"/>

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      -

      The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      +

      The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

      The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero entries non-zero than the other, then the excess components will be ignored.

      -

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      +

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

      See the tutorial program step-60 for an example on how to use this function.

      @@ -491,16 +491,16 @@
      const ComponentMask & comps1 = ComponentMask()&#href_anchor"memdoc">

      Create a coupling sparsity pattern for non-matching independent grids, using a convolution kernel with compact support of radius epsilon.

      -

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
-\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      +

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
+\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2020.png"/>

      -

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      +

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      This function will also work in parallel, provided that at least one of the triangulations is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if both triagnulations are of type parallel::distributed::Triangulation<dim1,spacedim>.

      @@ -575,15 +575,15 @@
      const ComponentMask & comps1 = ComponentMask()&#href_anchor"memdoc">

      Create a coupling mass matrix for non-matching independent grids, using a convolution kernel with compact support.

      -

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

      +

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

      -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2020.png"/>

      -

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      +

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern() function.

      The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-03-17 21:57:42.655229432 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-03-17 21:57:42.655229432 +0000 @@ -279,7 +279,7 @@
      -

      Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

      +

      Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

      Definition at line 201 of file quadrature_generator.cc.

      @@ -301,21 +301,21 @@
      -

      Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

      -

      Let $J_I$ be the index set of the indefinite functions:

      -

      $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

      -

      This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

      -

      $|\partial_k \psi_j| > L_{jk}$.

      -

      and then returns a coordinate direction, $i$, and a lower bound $L$, such that

      +

      Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

      +

      Let $J_I$ be the index set of the indefinite functions:

      +

      $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

      +

      This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

      +

      $|\partial_k \psi_j| > L_{jk}$.

      +

      and then returns a coordinate direction, $i$, and a lower bound $L$, such that

      -\[
+<picture><source srcset=\[
 i = \arg \max_{k} \min_{j \in J_I} L_{jk}, \\
 L =      \max_{k} \min_{j \in J_I} L_{jk}.
-\] +\]" src="form_2134.png"/>

      -

      This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

      -

      $|\partial_i \psi_j| > L$.

      -

      Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

      +

      This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

      +

      $|\partial_i \psi_j| > L$.

      +

      Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

      Definition at line 275 of file quadrature_generator.cc.

      @@ -386,7 +386,7 @@ std::pair< double, double > & value_bounds&#href_anchor"memdoc"> -

      Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

      +

      Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

      $[\min(L, L_f), \max(U, U_f)]$,

      where $L_f = \min_{v} f(x_v)$, $U_f = \max_{v} f(x_v)|$, and $x_v$ is a vertex.

      It is assumed that the incoming function is scalar valued.

      @@ -474,7 +474,7 @@
      -

      Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

      +

      Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

      $L_a \leq |f(x)|$,

      by estimating it from the incoming lower and upper bounds: $L \leq f(x) \leq U$.

      By rewriting the lower and upper bounds as $F - C \leq f(x) \leq F + C$, where $L = F - C$, $U = F + C$ (or $F = (U + L)/2$, $C = (U - L)/2$), we get $|f(x) - F| \leq C$. Using the inverse triangle inequality gives $|F| - |f(x)| \leq |f(x) - F| \leq C$. Thus, $L_a = |F| - C$.

      @@ -663,7 +663,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

      Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

      +

      Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

      \[
 X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q),
 W_q = w_I (y_{i+1} - y_i) w_q,
@@ -748,7 +748,7 @@
           <td></td>
           <td class=const std_cxx17::optional< HeightDirectionData > & height_direction_data&#href_anchor"memdoc">

      Return the coordinate direction that the box should be split in, assuming that the box should be split it half.

      -

      If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

      +

      If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

      Definition at line 995 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-03-17 21:57:42.683229605 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-03-17 21:57:42.687229629 +0000 @@ -141,21 +141,21 @@ const ComponentMask & space_comps = ComponentMask()&#href_anchor"memdoc">

      Create an interpolation sparsity pattern for particles.

      -

      Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

      -\[
+<p>Given a triangulation representing the domain <picture><source srcset=$\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

      +\[
 M_{i,j} \dealcoloneq v_j(x_i) ,
-\] +\]" src="form_2424.png"/>

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

      When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

      -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2425.png"/>

      -

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      -

      The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      -

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

      +

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      +

      The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      +

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

      Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::add_entries_local_to_global() is used to fill the final sparsity pattern.

      Definition at line 32 of file utilities.cc.

      @@ -192,21 +192,21 @@ const ComponentMask & space_comps = ComponentMask()&#href_anchor"memdoc">

      Create an interpolation matrix for particles.

      -

      Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

      +

      Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

      \[
 M_{ij} \dealcoloneq v_j(x_i) ,
 \]

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

      When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

      -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2425.png"/>

      -

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      -

      The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      -

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

      +

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      +

      The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      +

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

      Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::distribute_local_to_global() is used to distribute the entries of the matrix to respect the given constraints.

      Definition at line 114 of file utilities.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html 2024-03-17 21:57:42.715229802 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html 2024-03-17 21:57:42.723229852 +0000 @@ -149,14 +149,14 @@

      Return the deformation gradient tensor, as constructed from the material displacement gradient tensor Grad_u. The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{F}
    \dealcoloneq \nabla_{0} \boldsymbol{\varphi}
    \left( \mathbf{X} \right)
     =\mathbf{I} + \nabla_{0}\mathbf{u}
-\] +\]" src="form_2428.png"/>

      -

      where $\mathbf{u} = \mathbf{u}\left(\mathbf{X}\right)$ is the displacement at position $\mathbf{X}$ in the referential configuration. The differential operator $\nabla_{0}$ is defined as $\frac{\partial}{\partial \mathbf{X}}$.

      +

      where $\mathbf{u} = \mathbf{u}\left(\mathbf{X}\right)$ is the displacement at position $\mathbf{X}$ in the referential configuration. The differential operator $\nabla_{0}$ is defined as $\frac{\partial}{\partial \mathbf{X}}$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.14) on p. 23 (or thereabouts).
      For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (2.39) on p. 71 (or thereabouts).
      @@ -180,11 +180,11 @@

      Return the isochoric counterpart of the deformation gradient tensor F . The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{F}^{\text{iso}} \dealcoloneq J^{-1/\textrm{dim}} \mathbf{F}
-\] +\]" src="form_2430.png"/>

      -

      where $J = \text{det}\left(\mathbf{F}\right)$.

      +

      where $J = \text{det}\left(\mathbf{F}\right)$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.28) on p. 29 (or thereabouts).
      For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (6.79) on p. 228 (or thereabouts).
      @@ -208,11 +208,11 @@

      Return the volumetric counterpart of the deformation gradient tensor F . The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{F}^{\text{vol}} \dealcoloneq J^{1/\textrm{dim}} \mathbf{I}
-\] +\]" src="form_2432.png"/>

      -

      where $J = \text{det}\left(\mathbf{F}\right)$.

      +

      where $J = \text{det}\left(\mathbf{F}\right)$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.28) on p. 29 (or thereabouts).
      For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (6.79) on p. 228 (or thereabouts).
      @@ -236,9 +236,9 @@

      Return the symmetric right Cauchy-Green deformation tensor, as constructed from the deformation gradient tensor F. The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{C} \dealcoloneq \mathbf{F}^{T}\cdot\mathbf{F} \, .
-\] +\]" src="form_2433.png"/>

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.15) on p. 23 (or thereabouts).
      @@ -263,9 +263,9 @@

      Return the symmetric left Cauchy-Green deformation tensor, as constructed from the deformation gradient tensor F. The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{b} \dealcoloneq \mathbf{F}\cdot\mathbf{F}^{T} \, .
-\] +\]" src="form_2434.png"/>

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.25) on p. 28 (or thereabouts).
      @@ -290,10 +290,10 @@

      Return the symmetric Green-Lagrange strain tensor, as constructed from the deformation gradient tensor F. The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{E} \dealcoloneq \frac{1}{2}
  \left[ \mathbf{F}^{T}\cdot\mathbf{F} - \mathbf{I} \right] \, .
-\] +\]" src="form_2435.png"/>

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.15) on p. 23 (or thereabouts).
      @@ -318,12 +318,12 @@

      Return the symmetric small strain tensor, as constructed from the displacement gradient tensor Grad_u. The result is expressed as

      -\[
+<picture><source srcset=\[
  \boldsymbol{\varepsilon} \dealcoloneq \frac{1}{2}
  \left[ \nabla_{0}\mathbf{u} + [\nabla_{0}\mathbf{u}]^{T} \right] \, .
-\] +\]" src="form_2436.png"/>

      -

      where $\mathbf{u} = \mathbf{u}(\mathbf{X})$ is the displacement at position $\mathbf{X}$ in the referential configuration. The differential operator $\nabla_{0}$ is defined as $\frac{\partial}{\partial \mathbf{X}}$.

      +

      where $\mathbf{u} = \mathbf{u}(\mathbf{X})$ is the displacement at position $\mathbf{X}$ in the referential configuration. The differential operator $\nabla_{0}$ is defined as $\frac{\partial}{\partial \mathbf{X}}$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.17) on p. 24 (or thereabouts).
      @@ -345,10 +345,10 @@

      Return the symmetric Almansi strain tensor, as constructed from the deformation gradient tensor F. The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{e} \dealcoloneq \frac{1}{2} \left[ \mathbf{I}
   - \mathbf{F}^{-T}\cdot\mathbf{F}^{-1} \right] \, .
-\] +\]" src="form_2438.png"/>

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.35) on p. 30 (or thereabouts).
      @@ -374,9 +374,9 @@ const Tensor< 2, dim, Number > & dF_dt&#href_anchor"memdoc">

      Return the spatial velocity gradient tensor, as constructed from the deformation gradient tensor F and its material time derivative dF_dt (the material velocity gradient). The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{l} \dealcoloneq \dot{\mathbf{F}}\cdot\mathbf{F}^{-1} \, .
-\] +\]" src="form_2439.png"/>

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.47) on p. 32 (or thereabouts).
      @@ -402,15 +402,15 @@ const Tensor< 2, dim, Number > & dF_dt&#href_anchor"memdoc">

      Return the rate of deformation tensor (also known as the rate of strain tensor), as constructed from the deformation gradient tensor F and its material time derivative dF_dt (the material velocity gradient). The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{d} \dealcoloneq \frac{1}{2}
  \left[ \mathbf{l} + \mathbf{l}^{T} \right]
-\] +\]" src="form_2440.png"/>

      where

      -\[
+<picture><source srcset=\[
  \mathbf{l} = \dot{\mathbf{F}}\cdot\mathbf{F}^{-1}
-\] +\]" src="form_2441.png"/>

      is the spatial velocity gradient tensor.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.49) on p. 32 (or thereabouts).
      @@ -437,15 +437,15 @@ const Tensor< 2, dim, Number > & dF_dt&#href_anchor"memdoc">

      Return the rate of rotation tensor (also known as the vorticity tensor), as constructed from the deformation gradient tensor F and its material time derivative dF_dt (the material velocity gradient). The result is expressed as

      -\[
+<picture><source srcset=\[
  \mathbf{w} \dealcoloneq \frac{1}{2}
  \left[ \mathbf{l} - \mathbf{l}^{T} \right]
-\] +\]" src="form_2442.png"/>

      where

      -\[
+<picture><source srcset=\[
  \mathbf{l} = \dot{\mathbf{F}}\cdot\mathbf{F}^{-1}
-\] +\]" src="form_2441.png"/>

      is the spatial velocity gradient tensor.

      Note
      For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (2.149) on p. 97 (or thereabouts).
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-03-17 21:57:42.771230148 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-03-17 21:57:42.771230148 +0000 @@ -168,8 +168,8 @@ TensorType to_tensor (const FullMatrix< Number > &vec) &#href_anchor"details" id="details">

      Detailed Description

      A namespace with functions that assist in the conversion of vectors and tensors to and from a compressed format using Kelvin notation and weighting.

      -

      Both Kelvin and Voigt notation adopt the same indexing convention. With specific reference to the spatial dimension 3 case, for a rank-2 symmetric tensor $\mathbf{S}$ we enumerate its tensor components

      -\[
+<p>Both <a class=Kelvin and Voigt notation adopt the same indexing convention. With specific reference to the spatial dimension 3 case, for a rank-2 symmetric tensor $\mathbf{S}$ we enumerate its tensor components

      +\[
 \mathbf{S} \dealcoloneq \left[ \begin{array}{ccc}
  S_{00}          & S_{01}          & S_{02} \\
  S_{10} = S_{01} & S_{11}          & S_{12} \\
@@ -181,10 +181,10 @@
  sym   & n = 1 & n = 3 \\
  sym   & sym   & n = 2
 \end{array} \right] ,
-\] +\]" src="form_2465.png"/>

      -

      where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

      -\[
+<p> where <picture><source srcset=$n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

      +\[
 \mathbf{T} \dealcoloneq \left[ \begin{array}{ccc}
  T_{00} & T_{01} & T_{02} \\
  T_{10} & T_{11} & T_{12} \\
@@ -196,10 +196,10 @@
  n = 6 & n = 1 & n = 3 \\
  n = 7 & n = 8 & n = 2
 \end{array}\right] ,
-\] +\]" src="form_2467.png"/>

      -

      and for a rank-1 tensor $\mathbf{v}$

      -\[
+<p> and for a rank-1 tensor <picture><source srcset=$\mathbf{v}$

      +\[
 \mathbf{v} \dealcoloneq \left[ \begin{array}{c}
  v_{0} \\ v_{1} \\ v_{2}
 \end{array}\right]
@@ -207,7 +207,7 @@
 \left[ \begin{array}{c}
  n = 0 \\ n = 1 \\ n = 2
 \end{array}\right] .
-\] +\]" src="form_2469.png"/>

      To summarize, the relationship between tensor and Kelvin indices for both the three-dimensional case and the analogously discerned two-dimensional case outlined in the following table:

      @@ -249,23 +249,23 @@
      -

      To illustrate the purpose of this notation, consider the rank-2 symmetric tensors $\mathbf{S}$ and $\mathbf{E}$ that are related to one another by $\mathbf{S} = \cal{C} : \mathbf{E}$, where the operator $\cal{C}$ is a fourth-order symmetric tensor. As opposed to the commonly used Voigt notation, Kelvin (or Mandel) notation keeps the same definition of the inner product $\mathbf{S} : \mathbf{E}$ when both $\mathbf{S}$ and $\mathbf{E}$ are symmetric. In general, the inner product of all symmetric and general tensors remain the same regardless of the notation with which it is represented.

      +

      To illustrate the purpose of this notation, consider the rank-2 symmetric tensors $\mathbf{S}$ and $\mathbf{E}$ that are related to one another by $\mathbf{S} = \cal{C} : \mathbf{E}$, where the operator $\cal{C}$ is a fourth-order symmetric tensor. As opposed to the commonly used Voigt notation, Kelvin (or Mandel) notation keeps the same definition of the inner product $\mathbf{S} : \mathbf{E}$ when both $\mathbf{S}$ and $\mathbf{E}$ are symmetric. In general, the inner product of all symmetric and general tensors remain the same regardless of the notation with which it is represented.

      To achieve these two properties, namely that

      -\[
+<picture><source srcset=\[
 \mathbf{S} = \cal{C} : \mathbf{E}
 \quad \Rightarrow   \quad
 \tilde{\mathbf{S}} = \tilde{\cal{C}} \; \tilde{\mathbf{E}}
-\] +\]" src="form_2474.png"/>

      and

      -\[
+<picture><source srcset=\[
 \mathbf{S} : \mathbf{E}
 \, \equiv \,
 \tilde{\mathbf{S}} \cdot \tilde{\mathbf{E}} ,
-\] +\]" src="form_2475.png"/>

      -

      it holds that the Kelvin-condensed equivalents of the previously defined symmetric tensors, indicated by the $\tilde{\left(\bullet\right)}$, must be defined as

      -\[
+<p> it holds that the Kelvin-condensed equivalents of the previously defined symmetric tensors, indicated by the <picture><source srcset=$\tilde{\left(\bullet\right)}$, must be defined as

      +\[
 \tilde{\mathbf{S}}
   = \left[ \begin{array}{c}
   S_{00} \\ S_{11} \\ S_{22} \\ \sqrt{2} S_{12} \\ \sqrt{2} S_{02} \\
@@ -274,10 +274,10 @@
   = \left[ \begin{array}{c}
   E_{00} \\ E_{11} \\ E_{22} \\ \sqrt{2} E_{12} \\ \sqrt{2} E_{02} \\
 \sqrt{2} E_{01} \end{array}\right] .
-\] +\]" src="form_2477.png"/>

      The corresponding and consistent condensed fourth-order symmetric tensor is

      -\[
+<picture><source srcset=\[
 \tilde{\cal{C}}
   = \left[ \begin{array}{cccccc}
   \tilde{\cal{C}}_{00} & \tilde{\cal{C}}_{01} & \tilde{\cal{C}}_{02} &
@@ -312,10 +312,10 @@
 {\cal{C}}_{0201}        \\ \sqrt{2} {\cal{C}}_{0100}  & \sqrt{2}
 {\cal{C}}_{0111} & \sqrt{2} {\cal{C}}_{0122}  & 2 {\cal{C}}_{0112} & 2
 {\cal{C}}_{0102}         & 2 {\cal{C}}_{0101} \end{array}\right] .
-\] +\]" src="form_2478.png"/>

      -

      The mapping from the two Kelvin indices of the FullMatrix $\tilde{\cal{C}}$ to the rank-4 SymmetricTensor $\cal{C}$ can be inferred using the table shown above.

      -

      An important observation is that both the left-hand side tensor $\tilde{\mathbf{S}}$ and right-hand side tensor $\tilde{\mathbf{E}}$ have the same form; this is a property that is not present in Voigt notation. The various factors introduced into $\tilde{\mathbf{S}}$, $\tilde{\mathbf{E}}$ and $\tilde{\cal{C}}$ account for the symmetry of the tensors. The Kelvin description of their non-symmetric counterparts include no such factors.

      +

      The mapping from the two Kelvin indices of the FullMatrix $\tilde{\cal{C}}$ to the rank-4 SymmetricTensor $\cal{C}$ can be inferred using the table shown above.

      +

      An important observation is that both the left-hand side tensor $\tilde{\mathbf{S}}$ and right-hand side tensor $\tilde{\mathbf{E}}$ have the same form; this is a property that is not present in Voigt notation. The various factors introduced into $\tilde{\mathbf{S}}$, $\tilde{\mathbf{E}}$ and $\tilde{\cal{C}}$ account for the symmetry of the tensors. The Kelvin description of their non-symmetric counterparts include no such factors.

      Some useful references that show how this notation works include, amongst others,

      @article{Nagel2016,
      author = {Nagel, T. and G{\"o}rke, U-J. and Moerman, K. and Kolditz,
      O.},
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2024-03-17 21:57:42.795230296 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2024-03-17 21:57:42.803230346 +0000 @@ -123,7 +123,7 @@ &#href_anchor"details" id="details">

      Detailed Description

      A collection of operations to assist in the transformation of tensor quantities from the reference to spatial configuration, and vice versa. These types of transformation are typically used to re-express quantities measured or computed in one configuration in terms of a second configuration.

      Notation

      -

      We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

      +

      We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

      As a further point on notation, we will follow Holzapfel (2007) and denote the push forward transformation as $\chi\left(\bullet\right)$ and the pull back transformation as $\chi^{-1}\left(\bullet\right)$. We will also use the annotation $\left(\bullet\right)^{\sharp}$ to indicate that a tensor $\left(\bullet\right)$ is a contravariant tensor, and $\left(\bullet\right)^{\flat}$ that it is covariant. In other words, these indices do not actually change the tensor, they just indicate the kind of object a particular tensor is.

      Note
      For these transformations, unless otherwise stated, we will strictly assume that all indices of the transformed tensors derive from one coordinate system; that is to say that they are not multi-point tensors (such as the Piola stress in elasticity).

      Function Documentation

      @@ -186,18 +186,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a vector with a changed basis, i.e.

      -\[
+<picture><source srcset=\[
  \mathbf{V}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{V}
-\] +\]" src="form_2506.png"/>

      Parameters
      - - + +
      [in]VThe vector to be transformed $\mathbf{V}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]VThe vector to be transformed $\mathbf{V}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{V}^{\prime}$
      +
      Returns
      $\mathbf{V}^{\prime}$
      @@ -219,19 +219,19 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a rank-2 tensor with a changed basis, i.e.

      -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2508.png"/>

      Parameters
      - - + +
      [in]TThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]TThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{T}^{\prime}$
      +
      Returns
      $\mathbf{T}^{\prime}$
      @@ -253,19 +253,19 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a symmetric rank-2 tensor with a changed basis, i.e.

      -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2508.png"/>

      Parameters
      - - + +
      [in]TThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]TThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{T}^{\prime}$
      +
      Returns
      $\mathbf{T}^{\prime}$
      @@ -287,18 +287,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a rank-4 tensor with a changed basis, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2510.png"/>

      Parameters
      - - + +
      [in]HThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]HThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{H}^{\prime}$
      +
      Returns
      $\mathbf{H}^{\prime}$
      @@ -320,18 +320,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a symmetric rank-4 tensor with a changed basis, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2510.png"/>

      Parameters
      - - + +
      [in]HThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]HThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{H}^{\prime}$
      +
      Returns
      $\mathbf{H}^{\prime}$
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-03-17 21:57:42.827230494 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-03-17 21:57:42.835230543 +0000 @@ -149,10 +149,10 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2517.png"/>

      Parameters
      @@ -162,7 +162,7 @@
      -
      Returns
      $\chi\left( \mathbf{V} \right)$
      +
      Returns
      $\chi\left( \mathbf{V} \right)$
      @@ -184,11 +184,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp} \cdot
 \mathbf{F}^{T}
-\] +\]" src="form_2519.png"/>

      Parameters
      @@ -198,7 +198,7 @@
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -220,11 +220,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp} \cdot
 \mathbf{F}^{T}
-\] +\]" src="form_2519.png"/>

      Parameters
      @@ -234,7 +234,7 @@
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -256,11 +256,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq F_{iI} F_{jJ}
    \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2521.png"/>

      Parameters
      @@ -270,7 +270,7 @@
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -292,11 +292,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq F_{iI} F_{jJ}
    \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2521.png"/>

      Parameters
      @@ -306,7 +306,7 @@
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -328,10 +328,10 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2523.png"/>

      Parameters
      @@ -341,7 +341,7 @@
      -
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      @@ -363,11 +363,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
    \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2525.png"/>

      Parameters
      @@ -377,7 +377,7 @@
      -
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      @@ -399,11 +399,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 contravariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
    \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2525.png"/>

      Parameters
      @@ -413,7 +413,7 @@
      -
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      @@ -435,11 +435,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-4 contravariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\sharp} \right]_{IJKL}
    \dealcoloneq F^{-1}_{Ii} F^{-1}_{Jj}
 \left(\bullet\right)^{\sharp}_{ijkl} F^{-1}_{Kk} F^{-1}_{Ll}
-\] +\]" src="form_2527.png"/>

      Parameters
      @@ -449,7 +449,7 @@
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-03-17 21:57:42.859230692 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-03-17 21:57:42.867230741 +0000 @@ -149,10 +149,10 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a covariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2530.png"/>

      Parameters
      @@ -162,7 +162,7 @@
      -
      Returns
      $\chi\left( \mathbf{V} \right)$
      +
      Returns
      $\chi\left( \mathbf{V} \right)$
      @@ -184,11 +184,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 covariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2531.png"/>

      Parameters
      @@ -198,7 +198,7 @@
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -220,11 +220,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 covariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2531.png"/>

      Parameters
      @@ -234,7 +234,7 @@
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -256,11 +256,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 covariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2532.png"/>

      Parameters
      @@ -270,7 +270,7 @@
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -292,11 +292,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 covariant symmetric tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2532.png"/>

      Parameters
      @@ -306,7 +306,7 @@
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -328,10 +328,10 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a covariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2533.png"/>

      Parameters
      @@ -341,7 +341,7 @@
      -
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      @@ -363,11 +363,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 covariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat} \cdot
 \mathbf{F}
-\] +\]" src="form_2534.png"/>

      Parameters
      @@ -377,7 +377,7 @@
      -
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      @@ -399,11 +399,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 covariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}
-\] +\]" src="form_2535.png"/>

      Parameters
      @@ -413,7 +413,7 @@
      -
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{t} \right)$
      @@ -435,11 +435,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-4 contravariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\flat} \right]_{IJKL}
  \dealcoloneq F^{T}_{Ii} F^{T}_{Jj}
  \left(\bullet\right)^{\flat}_{ijkl} F^{T}_{Kk} F^{T}_{Ll}
-\] +\]" src="form_2536.png"/>

      Parameters
      @@ -449,7 +449,7 @@
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-03-17 21:57:42.895230914 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-03-17 21:57:42.899230939 +0000 @@ -140,11 +140,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
  \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
  \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2537.png"/>

      Parameters
      @@ -154,8 +154,8 @@
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{V} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{V} \right)$
      @@ -177,11 +177,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2539.png"/>

      Parameters
      @@ -191,8 +191,8 @@
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
      @@ -214,11 +214,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2539.png"/>

      Parameters
      @@ -228,8 +228,8 @@
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
      @@ -251,12 +251,12 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2541.png"/>

      Parameters
      @@ -266,8 +266,8 @@
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
      @@ -289,12 +289,12 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2541.png"/>

      Parameters
      @@ -304,8 +304,8 @@
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
      @@ -327,11 +327,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2543.png"/>

      Parameters
      @@ -341,8 +341,8 @@
      -
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
-\right)$
      +
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
+\right)$
      @@ -364,11 +364,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2545.png"/>

      Parameters
      @@ -378,8 +378,8 @@
      -
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
-\right)$
      +
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
+\right)$
      @@ -401,11 +401,11 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 contravariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2545.png"/>

      Parameters
      @@ -415,8 +415,8 @@
      -
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
-\right)$
      +
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html	2024-03-17 21:57:42.923231087 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html	2024-03-17 21:57:42.927231112 +0000
@@ -125,14 +125,14 @@
       </table>
 </div><div class=

      Return the rotation matrix for 2-d Euclidean space, namely

      -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq \left[ \begin{array}{cc}
  cos(\theta) & -sin(\theta) \\
  sin(\theta) & cos(\theta)
 \end{array}\right]
-\] +\]" src="form_2512.png"/>

      -

      where $\theta$ is the rotation angle given in radians. In particular, this describes the counter-clockwise rotation of a vector relative to a fixed set of right-handed axes.

      +

      where $\theta$ is the rotation angle given in radians. In particular, this describes the counter-clockwise rotation of a vector relative to a fixed set of right-handed axes.

      Parameters
      @@ -160,12 +160,12 @@
      [in]angleThe rotation angle (about the z-axis) in radians
      const Number & angle&#href_anchor"memdoc">

      Return the rotation matrix for 3-d Euclidean space. Most concisely stated using the Rodrigues' rotation formula, this function returns the equivalent of

      -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq cos(\theta)\mathbf{I} + sin(\theta)\mathbf{W}
              + (1-cos(\theta))\mathbf{u}\otimes\mathbf{u}
-\] +\]" src="form_2514.png"/>

      -

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      +

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (9.194) on p. 374 (or thereabouts). This presents Rodrigues' rotation formula, but the implementation used in this function is described in this wikipedia link. In particular, this describes the counter-clockwise rotation of a vector in a plane with its normal. defined by the axis of rotation. An alternative implementation is discussed at this link, but is inconsistent (sign-wise) with the Rodrigues' rotation formula as it describes the rotation of a coordinate system.
      Parameters
      @@ -197,12 +197,12 @@

      Return the rotation matrix for 3-d Euclidean space. Most concisely stated using the Rodrigues' rotation formula, this function returns the equivalent of

      -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq cos(\theta)\mathbf{I} + sin(\theta)\mathbf{W}
              + (1-cos(\theta))\mathbf{u}\otimes\mathbf{u}
-\] +\]" src="form_2514.png"/>

      -

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      +

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (9.194) on p. 374 (or thereabouts). This presents Rodrigues' rotation formula, but the implementation used in this function is described in this wikipedia link. In particular, this describes the counter-clockwise rotation of a vector in a plane with its normal. defined by the axis of rotation. An alternative implementation is discussed at this link, but is inconsistent (sign-wise) with the Rodrigues' rotation formula as it describes the rotation of a coordinate system.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2024-03-17 21:57:42.947231235 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2024-03-17 21:57:42.947231235 +0000 @@ -123,11 +123,11 @@
      const Tensor< 1, spacedim, Number > & b&#href_anchor"memdoc"> -

      Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

      +

      Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

      This function uses the geometric definition of the scalar product.

      -\[
+<picture><source srcset=\[
   \vec{a} \cdot \vec{b} = \|\vec{a}\| \|\vec{b}\| \cos(\theta)
-\] +\]" src="form_2550.png"/>

      @@ -154,21 +154,21 @@
      const Tensor< 1, spacedim, Number > & axis&#href_anchor"memdoc"> -

      Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

      -

      The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

      +

      Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

      +

      The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

      The vector axis needs to be a unit vector and be perpendicular to both vectors a and b.

      This function uses the geometric definitions of both the scalar and cross product.

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \vec{a} \cdot  \vec{b} &= \|\vec{a}\| \|\vec{b}\| \cos(\theta) \\
   \vec{a} \times \vec{b} &= \|\vec{a}\| \|\vec{b}\| \sin(\theta) \vec{n}
-\end{align*} +\end{align*}" src="form_2552.png"/>

      We can create the tangent of the angle using both products.

      -\[
+<picture><source srcset=\[
   \tan{\theta}
   = \frac{\sin(\theta)}{\cos(theta)}
   = \frac{(\vec{a} \times \vec{b}) \cdot \vec{n}}{\vec{a} \cdot \vec{b}}
-\] +\]" src="form_2553.png"/>

      Note
      Only applicable for three-dimensional vectors spacedim == 3.
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-03-17 21:57:42.975231408 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-03-17 21:57:42.983231458 +0000 @@ -110,13 +110,13 @@
      &#href_anchor"memitem:">class  TransformationSpectrumFolding
      &#href_anchor"details" id="details">

      Detailed Description

      Base namespace for solver classes using the SLEPc solvers which are selected based on flags passed to the eigenvalue problem solver context. Derived classes set the right flags to set the right solver.

      -

      The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

      +

      The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

      SLEPcWrappers can be implemented in application codes in the following way:

      SolverControl solver_control (1000, 1e-9);
      SolverArnoldi system (solver_control, mpi_communicator);
      system.solve (A, B, lambda, x, size_of_spectrum);
      -

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

      system.set_problem_type (EPS_NHEP);
      +

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

      system.set_problem_type (EPS_NHEP);
      system.set_which_eigenpairs (EPS_SMALLEST_REAL);

      These options can also be set at the command line.

      See also step-36 for a hands-on example.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-03-17 21:57:43.003231581 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-03-17 21:57:43.007231606 +0000 @@ -142,7 +142,7 @@
      const VectorType & b,
      double tol)>

      Type of function objects to interface with SUNDIALS' linear solvers

      -

      This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

      +

      This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-03-17 21:57:43.031231754 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-03-17 21:57:43.035231779 +0000 @@ -109,19 +109,19 @@
      [in]opA LinearOperator that applies the matrix vector product

      Detailed Description

      Smoothness estimation strategy based on the decay of Fourier expansion coefficients.

      -

      From the definition, we can write our Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

      -\begin{eqnarray*}
+<p>From the definition, we can write our <a class=Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

      +\begin{eqnarray*}
    u_h({\bf x}) &=& \sum_j u_j \varphi_j ({\bf x}) \\
    u_{h, {\bf k}}({\bf x}) &=&
      \sum_{{\bf k}, \|{\bf k}\|\le p} a_{\bf k} \phi_{\bf k}({\bf x}),
      \quad a_{\bf k} = \sum_j {\cal F}_{{\bf k},j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2228.png"/>

      -

      with $u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
-{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
-F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

      -

      If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

      -\begin{eqnarray*}
+<p> with <picture><source srcset=$u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
+{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
+F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

      +

      If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

      +\begin{eqnarray*}
   \| \nabla^s u_h({\bf x}) \|_{L^2(K)}^2 &=&
     \int\limits_K \left| \nabla^s u_h({\bf x}) \right|^2 d{\bf x} <
     \infty \\
@@ -130,40 +130,40 @@
     a_{\bf k} \, \phi_{\bf k}({\bf x}) \right|^2 d{\bf x} =
     (2 \pi)^{2s} \sum\limits_{\bf k} \left| a_{\bf k} \right|^2
     \|{\bf k}\|_2^{2s} < \infty
-\end{eqnarray*} +\end{eqnarray*}" src="form_2232.png"/>

      The sum is finite only if the summands decay at least with order

      -\[
+<picture><source srcset=\[
   |a_{\bf k}|^2 \|{\bf k}\|_2^{2s} \|{\bf k}\|_2^{d - 1} =
     {\cal O}\left( \|{\bf k}\|_2^{-1-\epsilon} \right)
-\] +\]" src="form_2233.png"/>

      -

      for all $\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
-d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

      -\[
+<p> for all <picture><source srcset=$\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
+d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

      +\[
   |a_{\bf k}| = {\cal O}\left(\|{\bf k}\|_2^
     {-\left(s + \frac d2 + \epsilon \right)} \right)
-\] +\]" src="form_2238.png"/>

      -

      The next step is to estimate how fast these coefficients decay with $\|{\bf k}\|_2$. Thus, we perform a least-squares fit

      -\[
+<p>The next step is to estimate how fast these coefficients decay with <picture><source srcset=$\|{\bf k}\|_2$. Thus, we perform a least-squares fit

      +\[
    \min_{\alpha,\sigma}
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( |a_{\bf k}| - \alpha \|{\bf k}\|_2^{-\sigma}\right)^2
-\] +\]" src="form_2240.png"/>

      -

      with regression coefficients $\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

      -\[
+<p> with regression coefficients <picture><source srcset=$\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

      +\[
    \min_{\beta,\sigma}
    Q(\beta,\sigma) =
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( \ln |a_{\bf k}| - \beta + \sigma \ln \|{\bf k}\|_2
 \right)^2,
-\] +\]" src="form_2241.png"/>

      -

      where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
-\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

      -\[
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

      +\[
    \left(\begin{array}{cc}
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} 1 &
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} \ln \|{\bf k}\|_2
@@ -180,10 +180,10 @@
    \\
    \sum_{{\bf k}, \|{\bf k}\|_2\le p} \ln |a_{{\bf k}}| \ln \|{\bf
 k}\|_2 \end{array}\right)
-\] +\]" src="form_2245.png"/>

      -

      Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

      -

      While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

      +

      Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

      +

      While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

      Note
      An extensive demonstration of the use of these functions is provided in step-27.

      Function Documentation

      @@ -228,17 +228,17 @@
      const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

      -\[
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

      +\[
   \ln \left( \max\limits_{\|{\bf k}\|_2} |a_{\bf k}| \right) \sim
     C - \sigma \ln \|{\bf k}\|_2
-\] +\]" src="form_2259.png"/>

      -

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

      -

      The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

      -

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      +

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

      +

      The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

      +

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

      -

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      +

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

      Definition at line 370 of file smoothness_estimator.cc.

      @@ -287,11 +287,11 @@
      const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      -

      The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

      -

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      +

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      +

      The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

      +

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

      -

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      +

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

      Definition at line 468 of file smoothness_estimator.cc.

      @@ -319,7 +319,7 @@

      Returns a FESeries::Fourier object for Fourier series expansions with the default configuration for smoothness estimation purposes.

      -

      For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

      +

      For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

      As the Fourier expansion can only be performed on scalar fields, this class does not operate on vector-valued finite elements and will therefore throw an assertion. However, each component of a finite element field can be treated as a scalar field, respectively, on which Fourier expansions are again possible. For this purpose, the optional parameter component defines which component of each FiniteElement will be used. The default value of component only applies to scalar FEs, in which case it indicates that the sole component is to be decomposed. For vector-valued FEs, a non-default value must be explicitly provided.

      Definition at line 577 of file smoothness_estimator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-03-17 21:57:43.067231976 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-03-17 21:57:43.075232026 +0000 @@ -109,25 +109,25 @@

      Detailed Description

      Smoothness estimation strategy based on the decay of Legendre expansion coefficients.

      -

      In one dimension, the finite element solution on cell $K$ with polynomial degree $p$ can be written as

      -\begin{eqnarray*}
+<p>In one dimension, the finite element solution on cell <picture><source srcset=$K$ with polynomial degree $p$ can be written as

      +\begin{eqnarray*}
    u_h(x) &=& \sum_j u_j \varphi_j (x) \\
    u_{h, k}(x) &=& \sum_{k=0}^{p} a_k \widetilde P_k (x),
    \quad a_k = \sum_j {\cal L}_{k,j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2217.png"/>

      -

      where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

      +

      where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

      A function is analytic, i.e., representable by a power series, if and only if their Legendre expansion coefficients decay as (see [eibner2007hp])

      -\[
+<picture><source srcset=\[
   |a_k| \sim c \, \exp(-\sigma k)
-\] +\]" src="form_2222.png"/>

      -

      We determine their decay rate $\sigma$ by performing the linear regression fit of

      -\[
+<p> We determine their decay rate <picture><source srcset=$\sigma$ by performing the linear regression fit of

      +\[
   \ln |a_k| \sim C - \sigma k
-\] +\]" src="form_2224.png"/>

      -

      for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

      +

      for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

      Function Documentation

      ◆ coefficient_decay()

      @@ -171,24 +171,24 @@ const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

      -\begin{eqnarray*}
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

      +\begin{eqnarray*}
   \widetilde P_{\bf k}({\bf x}) &=&
     \widetilde P_{k_1} (x_1) \ldots \widetilde P_{k_d} (x_d) \\
   \ln \left( \max\limits_{\|{\bf k}\|_1} |a_{\bf k}| \right) &\sim&
     C - \sigma \|{\bf k}\|_1
-\end{eqnarray*} +\end{eqnarray*}" src="form_2251.png"/>

      -

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

      +

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

      For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

      Parameters
      - + - - + +
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
      [in]dof_handlerA DoFHandler.
      [in]solutionA solution vector.
      [out]smoothness_indicatorsA vector for smoothness indicators.
      [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
      [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
      [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.
      @@ -241,16 +241,16 @@ const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      +

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

      Parameters
      - + - - + +
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
      [in]dof_handlerA DoFHandler
      [in]solutionA solution vector
      [out]smoothness_indicatorsA vector for smoothness indicators
      [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
      [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
      [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to NaN.
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-03-17 21:57:43.103232199 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-03-17 21:57:43.103232199 +0000 @@ -139,18 +139,18 @@ SparsityPatternType2 & sparsity_pattern_out&#href_anchor"memdoc">

      Given a sparse matrix (system_matrix, sparsity_pattern), construct a new sparse matrix (system_matrix_out, sparsity_pattern_out) by restriction

      -\[
+<picture><source srcset=\[
  A_i = R_i A R_i^T,
-\] +\]" src="form_1926.png"/>

      -

      where the Boolean matrix $R_i$ is defined by the entries of requested_is.

      -

      The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

      +

      where the Boolean matrix $R_i$ is defined by the entries of requested_is.

      +

      The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

      Such a function is useful to implement Schwarz methods, where operations of type

      -\[
+<picture><source srcset=\[
  u^{n} = u^{n-1} + \sum_{i} R_i^T A_i^{-1} R_i (f - A u^{n-1})
-\] +\]" src="form_1928.png"/>

      -

      are performed to iteratively solve a system of type $Au=f$.

      +

      are performed to iteratively solve a system of type $Au=f$.

      Warning
      This is a collective call that needs to be executed by all processes in the communicator of sparse_matrix_in.
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-03-17 21:57:43.123232322 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-03-17 21:57:43.127232347 +0000 @@ -177,7 +177,7 @@
      Note
      This function returns an internal class object consisting of an array subscript operator operator[](unsigned int) and an alias value_type describing its return value.
      Template Parameters
      - +
      indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
      indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
      rankRank of the tensorial object t
      TA tensorial object of rank rank. T must provide a local alias value_type and an index operator operator[]() that returns a (const or non-const) reference of value_type.
      @@ -261,12 +261,12 @@

      This function contracts two tensorial objects left and right and stores the result in result. The contraction is done over the last no_contr indices of both tensorial objects:

      -\[
+<picture><source srcset=\[
   \text{result}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   = \sum_{k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{left}_{i_1,..,i_{r1},k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{right}_{j_1,..,j_{r2},k_1,..,k_{\mathrm{no\_contr}}}
-\] +\]" src="form_865.png"/>

      Calling this function is equivalent of writing the following low level code:

      for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
      ...
      @@ -321,12 +321,12 @@

      Full contraction of three tensorial objects:

      -\[
+<picture><source srcset=\[
   \sum_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{left}_{i_1,..,i_{r1}}
   \text{middle}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{right}_{j_1,..,j_{r2}}
-\] +\]" src="form_866.png"/>

      Calling this function is equivalent of writing the following low level code:

      T1 result = T1();
      for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-03-17 21:57:43.151232495 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-03-17 21:57:43.155232520 +0000 @@ -134,8 +134,8 @@

      Return the elements of a continuous Givens rotation matrix and the norm of the input vector.

      -

      That is for a given pair x and y, return $c$ , $s$ and $\sqrt{x^2+y^2}$ such that

      -\[
+<p>That is for a given pair <code>x</code> and <code>y</code>, return <picture><source srcset=$c$ , $s$ and $\sqrt{x^2+y^2}$ such that

      +\[
 \begin{bmatrix}
 c  & s \\
 -s & c
@@ -149,7 +149,7 @@
 \sqrt{x^2+y^2} \\
 0
 \end{bmatrix}
-\] +\]" src="form_1964.png"/>

      Note
      The function is implemented for real valued numbers only.
      @@ -176,8 +176,8 @@

      Return the elements of a hyperbolic rotation matrix.

      -

      That is for a given pair x and y, return $c$ , $s$ and $r$ such that

      -\[
+<p>That is for a given pair <code>x</code> and <code>y</code>, return <picture><source srcset=$c$ , $s$ and $r$ such that

      +\[
 \begin{bmatrix}
 c  & -s \\
 -s & c
@@ -191,9 +191,9 @@
 r \\
 0
 \end{bmatrix}
-\] +\]" src="form_1965.png"/>

      -

      Real valued solution only exists if $|x|>|g|$, the function will throw an error otherwise.

      +

      Real valued solution only exists if $|x|>|g|$, the function will throw an error otherwise.

      Note
      The function is implemented for real valued numbers only.
      @@ -230,7 +230,7 @@ std::vector< double > * eigenvalues = nullptr&#href_anchor"memdoc"> -

      Estimate an upper bound for the largest eigenvalue of H by a k -step Lanczos process starting from the initial vector v0. Typical values of k are below 10. This estimator computes a k-step Lanczos decomposition $H V_k=V_k T_k+f_k e_k^T$ where $V_k$ contains k Lanczos basis, $V_k^TV_k=I_k$, $T_k$ is the tridiagonal Lanczos matrix, $f_k$ is a residual vector $f_k^TV_k=0$, and $e_k$ is the k-th canonical basis of $R^k$. The returned value is $ ||T_k||_2 + ||f_k||_2$. If eigenvalues is not nullptr, the eigenvalues of $T_k$ will be written there.

      +

      Estimate an upper bound for the largest eigenvalue of H by a k -step Lanczos process starting from the initial vector v0. Typical values of k are below 10. This estimator computes a k-step Lanczos decomposition $H V_k=V_k T_k+f_k e_k^T$ where $V_k$ contains k Lanczos basis, $V_k^TV_k=I_k$, $T_k$ is the tridiagonal Lanczos matrix, $f_k$ is a residual vector $f_k^TV_k=0$, and $e_k$ is the k-th canonical basis of $R^k$. The returned value is $ ||T_k||_2 + ||f_k||_2$. If eigenvalues is not nullptr, the eigenvalues of $T_k$ will be written there.

      vector_memory is used to allocate memory for temporary vectors. OperatorType has to provide vmult operation with VectorType.

      This function implements the algorithm from

      @article{Zhou2006,
      Title = {Self-consistent-field Calculations Using Chebyshev-filtered
      @@ -242,7 +242,7 @@
      Volume = {219},
      Pages = {172--184},
      }
      -
      Note
      This function uses Lapack routines to compute the largest eigenvalue of $T_k$.
      +
      Note
      This function uses Lapack routines to compute the largest eigenvalue of $T_k$.
      This function provides an alternate estimate to that obtained from several steps of SolverCG with SolverCG<VectorType>::connect_eigenvalues_slot().
      @@ -285,19 +285,19 @@ VectorMemory< VectorType > & vector_memory&#href_anchor"memdoc"> -

      Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
-\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

      -

      This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

      +

      Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
+\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

      +

      This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

      -

      By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

      -

      The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

      +

      By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

      +

      The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

      vector_memory is used to allocate memory for temporary objects.

      -

      This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from

      @article{Zhou2014,
      +

      This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from

      @article{Zhou2014,
      Title = {Chebyshev-filtered subspace iteration method free of sparse
      diagonalization for solving the Kohn--Sham equation},
      Author = {Zhou, Yunkai and Chelikowsky, James R and Saad, Yousef},
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-03-17 21:57:43.187232717 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-03-17 21:57:43.191232742 +0000 @@ -137,7 +137,7 @@

      Detailed Description

      A namespace for algorithms that implement the task of communicating in a dynamic-sparse way. In computer science, this is often called a consensus problem.

      -

      The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

      +

      The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

      That this is not a trivial problem can be seen by an analogy of the postal service. There, some senders may request information from some other participants in the postal service. So they send a letter that requests the information, but the recipients do not know how many such letters they need to expect (or that they should expect any at all). They also do not know how long they need to keep checking their mailbox for incoming requests. The recipients can be considered reliable, however: We can assume that everyone who is sent a request puts a letter with the answer in the mail. This time at least the recipients of these answers know that they are waiting for these answers because they have previously sent a request. They do not know in advance, however, when the answer will arrive and how long to wait. The goal of a consensus algorithm is then to come up with a strategy in which every participant can say who they want to send requests to, what that request is, and is then guaranteed an answer. The algorithm will only return when all requests by all participants have been answered and the answer delivered to the requesters.

      The problem is generally posed in terms of requests and answers. In practice, either of these two may be empty messages. For example, processes may simply want to send information to others that they know these others need; in this case, the "answer" message may be empty and its meaning is simply an affirmation that the information was received. Similarly, in some cases processes simply need to inform others that they want information, but the destination process knows what information is being requested (based on where in the program the request happens) and can send that information without there be any identifying information in the request; in that case, the request message may be empty and simply serve to identify the requester. (Each message can be queried for its sender.)

      As mentioned in the first paragraph, the algorithms we are interested in are "dynamic-sparse":

        /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-03-17 21:57:43.311233483 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-03-17 21:57:43.311233483 +0000 @@ -332,7 +332,7 @@

      • -

        Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

        +

        Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

        In order to get proper results, it be may necessary to treat boundary conditions right. Below are listed some cases where this may be needed. If needed, this is done by L2-projection of the trace of the given function onto the finite element space restricted to the boundary of the domain, then taking this information and using it to eliminate the boundary nodes from the mass matrix of the whole domain, using the MatrixTools::apply_boundary_values() function. The projection of the trace of the function to the boundary is done with the VectorTools::project_boundary_values() (see below) function, which is called with a map of boundary functions std::map<types::boundary_id, const Function<spacedim,number>*> in which all boundary indicators from zero to numbers::internal_face_boundary_id-1 (numbers::internal_face_boundary_id is used for other purposes, see the Triangulation class documentation) point to the function to be projected. The projection to the boundary takes place using a second quadrature formula on the boundary given to the project() function. The first quadrature formula is used to compute the right hand side and for numerical quadrature of the mass matrix.

        The projection of the boundary values first, then eliminating them from the global system of equations is not needed usually. It may be necessary if you want to enforce special restrictions on the boundary values of the projected function, for example in time dependent problems: you may want to project the initial values but need consistency with the boundary values for later times. Since the latter are projected onto the boundary in each time step, it is necessary that we also project the boundary values of the initial values, before projecting them to the whole domain.

        Obviously, the results of the two schemes for projection are different. Usually, when projecting to the boundary first, the L2-norm of the difference between original function and projection over the whole domain will be larger (factors of five have been observed) while the L2-norm of the error integrated over the boundary should of course be less. The reverse should also hold if no projection to the boundary is performed.

        @@ -342,17 +342,17 @@

      • -

        Creation of right hand side vectors: The create_right_hand_side() function computes the vector $f_i = \int_\Omega f(x) \phi_i(x) dx$. This is the same as what the MatrixCreator::create_* functions which take a right hand side do, but without assembling a matrix.

        +

        Creation of right hand side vectors: The create_right_hand_side() function computes the vector $f_i = \int_\Omega f(x) \phi_i(x) dx$. This is the same as what the MatrixCreator::create_* functions which take a right hand side do, but without assembling a matrix.

      • -

        Creation of right hand side vectors for point sources: The create_point_source_vector() function computes the vector $F_i =
-\int_\Omega \delta(x-x_0) \phi_i(x) dx$.

        +

        Creation of right hand side vectors for point sources: The create_point_source_vector() function computes the vector $F_i =
+\int_\Omega \delta(x-x_0) \phi_i(x) dx$.

      • -

        Creation of boundary right hand side vectors: The create_boundary_right_hand_side() function computes the vector $f_i =
-\int_{\partial\Omega} g(x) \phi_i(x) dx$. This is the right hand side contribution of boundary forces when having inhomogeneous Neumann boundary values in Laplace's equation or other second order operators. This function also takes an optional argument denoting over which parts of the boundary the integration shall extend. If the default argument is used, it is applied to all boundaries.

        +

        Creation of boundary right hand side vectors: The create_boundary_right_hand_side() function computes the vector $f_i =
+\int_{\partial\Omega} g(x) \phi_i(x) dx$. This is the right hand side contribution of boundary forces when having inhomogeneous Neumann boundary values in Laplace's equation or other second order operators. This function also takes an optional argument denoting over which parts of the boundary the integration shall extend. If the default argument is used, it is applied to all boundaries.

      • @@ -376,7 +376,7 @@

        The infinity norm of the difference on a given cell returns the maximum absolute value of the difference at the quadrature points given by the quadrature formula parameter. This will in some cases not be too good an approximation, since for example the Gauss quadrature formulae do not evaluate the difference at the end or corner points of the cells. You may want to choose a quadrature formula with more quadrature points or one with another distribution of the quadrature points in this case. You should also take into account the superconvergence properties of finite elements in some points: for example in 1d, the standard finite element method is a collocation method and should return the exact value at nodal points. Therefore, the trapezoidal rule should always return a vanishing L-infinity error. Conversely, in 2d the maximum L-infinity error should be located at the vertices or at the center of the cell, which would make it plausible to use the Simpson quadrature rule. On the other hand, there may be superconvergence at Gauss integration points. These examples are not intended as a rule of thumb, rather they are thought to illustrate that the use of the wrong quadrature formula may show a significantly wrong result and care should be taken to chose the right formula.

        The H1 seminorm is the L2 norm of the gradient of the difference. The square of the full H1 norm is the sum of the square of seminorm and the square of the L2 norm.

        To get the global L1 error, you have to sum up the entries in difference, e.g. using Vector::l1_norm() function. For the global L2 difference, you have to sum up the squares of the entries and take the root of the sum, e.g. using Vector::l2_norm(). These two operations represent the l1 and l2 norms of the vectors, but you need not take the absolute value of each entry, since the cellwise norms are already positive.

        -

        To get the global mean difference, simply sum up the elements as above. To get the $L_\infty$ norm, take the maximum of the vector elements, e.g. using the Vector::linfty_norm() function.

        +

        To get the global mean difference, simply sum up the elements as above. To get the $L_\infty$ norm, take the maximum of the vector elements, e.g. using the Vector::linfty_norm() function.

        For the global H1 norm and seminorm, the same rule applies as for the L2 norm: compute the l2 norm of the cell error vector.

        Note that, in the codimension one case, if you ask for a norm that requires the computation of a gradient, then the provided function is automatically projected along the curve, and the difference is only computed on the tangential part of the gradient, since no information is available on the normal component of the gradient anyway.

      • @@ -395,220 +395,220 @@
      -

      Denote which norm/integral is to be computed by the integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

      -

      In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

      -\[
+<p>Denote which norm/integral is to be computed by the <a class=integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

      +

      In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

      +\[
 \int_A f(x) dx \approx \sum_q f(x_q) \omega_q.
-\] +\]" src="form_2308.png"/>

      -

      Similarly for suprema over a cell $T$:

      -\[
+<p> Similarly for suprema over a cell <picture><source srcset=$T$:

      +\[
 \sup_{x\in T} |f(x)| dx \approx \max_q |f(x_q)|.
-\] +\]" src="form_2309.png"/>

      -
      Enumerator
      mean 

      The function or difference of functions is integrated on each cell $K$:

      -\[
+<picture><source srcset=\[
   E_K
 = \int_K \sum_c (\hat{f}_c - f_c) \, w_c
 = \int_K \sum_c e_c \, w_c
-\] +\]" src="form_2310.png"/>

      and summed up to get

      -\[
+<picture><source srcset=\[
   E = \sum_K E_K
     = \int_\Omega \sum_c (\hat{f}_c - f_c) \, w_c
-\] +\]" src="form_2311.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \int_\Omega (\hat{f} - f)
     = \int_\Omega e.
-\] +\]" src="form_2313.png"/>

      -

      Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

      +

      Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

      L1_norm 

      The absolute value of the function is integrated:

      -\[
+<picture><source srcset=\[
   E_K = \int_K \sum_c |e_c| \, w_c
-\] +\]" src="form_2316.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \sum_K E_K = \int_\Omega \sum_c |e_c| w_c,
-\] +\]" src="form_2317.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E  = \| e \|_{L^1}.
-\] +\]" src="form_2318.png"/>

      L2_norm 

      The square of the function is integrated and the square root of the result is computed on each cell:

      -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2319.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega  \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2320.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \sqrt{ \int_\Omega e^2 }
     = \| e \|_{L^2}
-\] +\]" src="form_2321.png"/>

      Lp_norm 

      The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

      -\[
+<tr><td class=Lp_norm 

      The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

      +\[
   E_K = \left( \int_K \sum_c |e_c|^p \, w_c \right)^{1/p}
-\] +\]" src="form_2322.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \left( \sum_K E_K^p \right)^{1/p}
-\] +\]" src="form_2323.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \| e \|_{L^p}.
-\] +\]" src="form_2324.png"/>

      Linfty_norm 

      The maximum absolute value of the function:

      -\[
+<picture><source srcset=\[
   E_K = \sup_K \max_c |e_c| \, w_c
-\] +\]" src="form_2325.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \max_K E_K
 = \sup_\Omega \max_c |e_c| \, w_c
-\] +\]" src="form_2326.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E  = \sup_\Omega \|e\|_\infty = \| e \|_{L^\infty}.
-\] +\]" src="form_2327.png"/>

      H1_seminorm 

      L2_norm of the gradient:

      -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c (\nabla e_c)^2 \, w_c }
-\] +\]" src="form_2328.png"/>

      and

      -\[
+<picture><source srcset=\[
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html	2024-03-17 21:57:43.355233755 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html	2024-03-17 21:57:43.359233780 +0000
@@ -529,9 +529,9 @@
         </tr>
       </table>
 </div><div class= -

      Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

      +

      Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

      For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p.

      -

      For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

      +

      For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

      During h-coarsening, the finite elements on siblings may be different, and their parent cell will be assigned to their least dominating finite element that belongs to its most general child. Thus, we will always interpolate on an enclosing finite element space. Additionally assuming that the finite elements on the cells to be coarsened are sufficient to represent the solution correctly (e.g. at least quadratic basis functions for a quadratic solution), we are confident to say that the error will not change by sole interpolation on the larger finite element space.

      For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p. The assumption of exponential convergence is only valid if both h- and p-adaptive methods are combined in a sense that they are both utilized throughout a mesh, but do not have to be applied both on a cell simultaneously.

      The prediction algorithm is formulated as follows with control parameters gamma_p, gamma_h and gamma_n that may be used to influence prediction for each adaptation type individually. The results for each individual cell are stored in the predicted_errors output argument.

      @@ -553,7 +553,7 @@ \gamma_\text{p}^{(p_{K,\text{future}} - p_{K})}$" src="form_1500.png"/>

      On basis of the refinement history, we use the predicted error estimates to decide how cells will be adapted in the next adaptation step. Comparing the predicted error from the previous adaptation step to the error estimates of the current step allows us to justify whether our previous choice of adaptation was justified, and lets us decide how to adapt in the next one.

      -

      We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

      +

      We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

      In this context, we assume that the local error on a cell to be h-refined will be divided equally on all of its $n_{K_c}$ children, whereas local errors on siblings will be summed up on the parent cell in case of h-coarsening. This assumption is often not satisfied in practice: For example, if a cell is at a corner singularity, then the one child cell that ends up closest to the singularity will inherit the majority of the remaining error – but this function can not know where the singularity will be, and consequently assumes equal distribution.

      Incorporating the transfer from the old to the adapted mesh, the complete error prediction algorithm reads as follows:

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-03-17 21:57:43.471234471 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-03-17 21:57:43.475234496 +0000 @@ -853,8 +853,8 @@
      const double coordinate_value&#href_anchor"memdoc">

      Creates a (dim + 1)-dimensional point by copying over the coordinates of the incoming dim-dimensional point and setting the "missing" (dim + 1)-dimensional component to the incoming coordinate value.

      -

      For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

      -

      The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

      +

      For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

      +

      The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

      Definition at line 24 of file function_restriction.cc.

      @@ -1725,7 +1725,7 @@
      -

      Compute the polynomial interpolation of a tensor product shape function $\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
+<p>Compute the polynomial interpolation of a tensor product shape function <picture><source srcset=$\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
 \prod_{d=1}^{\text{dim}}\varphi_{i_d}^\text{1d}(x_d)$ represent a tensor product. The function returns a pair with the value of the interpolation as the first component and the gradient in reference coordinates as the second component. Note that for compound types (e.g. the values field begin a Point<spacedim> argument), the components of the gradient are sorted as Tensor<1, dim, Tensor<1, spacedim>> with the derivatives as the first index; this is a consequence of the generic arguments in the function.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-03-17 21:57:43.507234694 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-03-17 21:57:43.515234743 +0000 @@ -383,7 +383,7 @@
      const unsigned int grainsize&#href_anchor"memdoc">

      This function works a lot like the apply_to_subranges() function, but it allows to accumulate numerical results computed on each subrange into one number. The type of this number is given by the ResultType template argument that needs to be explicitly specified.

      -

      An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

      void matrix_norm (const FullMatrix &A,
      +

      An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

      void matrix_norm (const FullMatrix &A,
      const Vector &x)
      {
      return
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-03-17 21:57:43.547234941 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-03-17 21:57:43.555234991 +0000 @@ -303,7 +303,7 @@

      This program obviously does not have a whole lot of functionality, but in particular the second_grid function has a bunch of places where you can play with it. For example, you could modify the criterion by which we decide which cells to refine. An example would be to change the condition to this:

      for (auto &cell: triangulation.active_cell_iterators())
      if (cell->center()[1] > 0)
      cell->set_refine_flag ();
      -

      This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

      +

      This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

      In general, what you can do with operations of the form cell->something() is a bit difficult to find in the documentation because cell is not a pointer but an iterator. The functions you can call on a cell can be found in the documentation of the classes TriaAccessor (which has functions that can also be called on faces of cells or, more generally, all sorts of geometric objects that appear in a triangulation), and CellAccessor (which adds a few functions that are specific to cells).

      A more thorough description of the whole iterator concept can be found in the Iterators on mesh-like containers documentation module.

      Different geometries

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-03-17 21:57:43.599235262 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-03-17 21:57:43.591235212 +0000 @@ -110,10 +110,10 @@
    3. The plain program
    4. Introduction

      -

      This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

      +

      This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

      For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

      -

      Rather than demonstrating the use of higher order mappings with one of these more complicated examples, we do only a brief computation: calculating the value of $\pi=3.141592653589793238462643\ldots$ by two different methods.

      -

      The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

      +

      Rather than demonstrating the use of higher order mappings with one of these more complicated examples, we do only a brief computation: calculating the value of $\pi=3.141592653589793238462643\ldots$ by two different methods.

      +

      The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

      The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

      Note
      This tutorial shows in essence how to choose a particular mapping for integrals, by attaching a particular geometry to the triangulation (as had already been done in step-1, for example) and then passing a mapping argument to the FEValues class that is used for all integrals in deal.II. The geometry we choose is a circle, for which deal.II already has a class (SphericalManifold) that can be used. If you want to define your own geometry, for example because it is complicated and cannot be described by the classes already available in deal.II, you will want to read through step-53.

      The commented program

      @@ -157,7 +157,7 @@
      void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
      const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
      -

      Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

      +

      Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

        for (unsigned int refinement = 0; refinement < 2; ++refinement)
        {
        std::cout << "Refinement level: " << refinement << std::endl;
      @@ -194,9 +194,9 @@
        }
        }
       
      -

      Now we proceed with the main part of the code, the approximation of $\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
+</div><!-- fragment --><p>Now we proceed with the main part of the code, the approximation of <picture><source srcset=$\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
    \ \textrm{det}\ J(\hat x) d\hat x \approx \sum_i \textrm{det}
-   \ J(\hat x_i)w(\hat x_i)$, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

      + \ J(\hat x_i)w(\hat x_i)$" src="form_2751.png"/>, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

        template <int dim>
        void compute_pi_by_area()
        {
      @@ -280,7 +280,7 @@
        }
       
       
      -

      The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

      +

      The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

        template <int dim>
        void compute_pi_by_perimeter()
        {
      @@ -404,11 +404,11 @@
      unset ytics
      plot [-1:1][-1:1] "ball_0_mapping_q_1.dat" lw 4 lt rgb "black"

      or using one of the other filenames. The second line makes sure that the aspect ratio of the generated output is actually 1:1, i.e. a circle is drawn as a circle on your screen, rather than as an ellipse. The third line switches off the key in the graphic, as that will only print information (the filename) which is not that important right now. Similarly, the fourth and fifth disable tick marks. The plot is then generated with a specific line width ("lw", here set to 4) and line type ("lt", here chosen by saying that the line should be drawn using the RGB color "black").

      -

      The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

      +

      The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

      Five-cell discretization of the disk.
      20-cell discretization of the disk (i.e., five cells
               refined once).
      Five-cell discretization of the disk with quadratic edges. The
               boundary is nearly indistinguishable from the actual circle.
      20-cell discretization with quadratic edges.
      Five-cell discretization of the disk with cubic edges. The
-              boundary is nearly indistinguishable from the actual circle.
      20-cell discretization with cubic edges.

      These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

      + boundary is nearly indistinguishable from the actual circle." style="pointer-events: none;" width="400" height="400" class="inline"/>
      20-cell discretization with cubic edges.

      These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

      Close-up of quadratic discretization. The distance between the
          quadratic interpolant and the actual circle is small.
      Close-up of cubic discretization. The distance between the
          cubic interpolant and the actual circle is very small.

      Obviously the quadratic mapping approximates the boundary quite well, while for the cubic mapping the difference between approximated domain and true one is hardly visible already for the coarse grid. You can also see that the mapping only changes something at the outer boundaries of the triangulation. In the interior, all lines are still represented by linear functions, resulting in additional computations only on cells at the boundary. Higher order mappings are therefore usually not noticeably slower than lower order ones, because the additional computations are only performed on a small subset of all cells.

      @@ -500,14 +500,14 @@
      1280 3.1415926535897896 3.5527e-15 3.32
      5120 3.1415926535897940 8.8818e-16 2.00
      unsigned int level
      Definition grid_out.cc:4618
      -
      Note
      Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
      -

      One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

      -

      The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error +

      Note
      Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
      +

      One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

      +

      The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error estimates for finite element approximations to eigenvalues and eigenfunctions of the Laplace-Beltrami operator", submitted, 2018.)

      Possibilities for extensions

      -

      As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

      +

      As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

      The question is whether one can do anything about this. One thought is to use a higher-precision data type. For example, one could think of declaring both the area and perimeter variables in compute_pi_by_area() and compute_pi_by_perimeter() with data type long double. long double is a data type that is not well specified in the C++ standard but at least on Intel processors has around 19, instead of around 16, digits of accuracy. If we were to do that, we would get results that differ from the ones shown above. However, maybe counter-intuitively, they are not uniformly better. For example, when computing $\pi$ by the area, at the time of writing these sentences we get these values with double precision for degree 4:

      5 3.1415871927401144 5.4608e-06 -
      20 3.1415926314742491 2.2116e-08 7.95
      80 3.1415926535026268 8.7166e-11 7.99
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-03-17 21:57:43.643235535 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-03-17 21:57:43.643235535 +0000 @@ -110,55 +110,55 @@

      Introduction

      The problem we will be considering is the solution of Laplace's problem with Neumann boundary conditions only:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\Delta u &=& f \qquad \mathrm{in}\ \Omega,
   \\
   \partial_n u &=& g \qquad \mathrm{on}\ \partial\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2760.png"/>

      It is well known that if this problem is to have a solution, then the forces need to satisfy the compatibility condition

      -\[
+<picture><source srcset=\[
   \int_\Omega f\; dx + \int_{\partial\Omega} g\; ds = 0.
-\] +\]" src="form_2761.png"/>

      -

      We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

      +

      We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

      The compatibility condition allows a solution of the above equation, but it nevertheless retains an ambiguity: since only derivatives of the solution appear in the equations, the solution is only determined up to a constant. For this reason, we have to pose another condition for the numerical solution, which fixes this constant.

      For this, there are various possibilities:

      1. -

        Fix one node of the discretization to zero or any other fixed value. This amounts to an additional condition $u_h(x_0)=0$. Although this is common practice, it is not necessarily a good idea, since we know that the solutions of Laplace's equation are only in $H^1$, which does not allow for the definition of point values because it is not a subset of the continuous functions. Therefore, even though fixing one node is allowed for discretized functions, it is not for continuous functions, and one can often see this in a resulting error spike at this point in the numerical solution.

        +

        Fix one node of the discretization to zero or any other fixed value. This amounts to an additional condition $u_h(x_0)=0$. Although this is common practice, it is not necessarily a good idea, since we know that the solutions of Laplace's equation are only in $H^1$, which does not allow for the definition of point values because it is not a subset of the continuous functions. Therefore, even though fixing one node is allowed for discretized functions, it is not for continuous functions, and one can often see this in a resulting error spike at this point in the numerical solution.

      2. -

        Fixing the mean value over the domain to zero or any other value. This is allowed on the continuous level, since $H^1(\Omega)\subset L^1(\Omega)$ by Sobolev's inequality, and thus also on the discrete level since we there only consider subsets of $H^1$.

        +

        Fixing the mean value over the domain to zero or any other value. This is allowed on the continuous level, since $H^1(\Omega)\subset L^1(\Omega)$ by Sobolev's inequality, and thus also on the discrete level since we there only consider subsets of $H^1$.

      3. -Fixing the mean value over the boundary of the domain to zero or any other value. This is also allowed on the continuous level, since $H^{1/2}(\partial\Omega)\subset L^1(\partial\Omega)$, again by Sobolev's inequality.
      4. +Fixing the mean value over the boundary of the domain to zero or any other value. This is also allowed on the continuous level, since $H^{1/2}(\partial\Omega)\subset L^1(\partial\Omega)$, again by Sobolev's inequality.

      We will choose the last possibility, since we want to demonstrate another technique with it.

      While this describes the problem to be solved, we still have to figure out how to implement it. Basically, except for the additional mean value constraint, we have solved this problem several times, using Dirichlet boundary values, and we only need to drop the treatment of Dirichlet boundary nodes. The use of higher order mappings is also rather trivial and will be explained at the various places where we use it; in almost all conceivable cases, you will only consider the objects describing mappings as a black box which you need not worry about, because their only uses seem to be to be passed to places deep inside the library where functions know how to handle them (i.e. in the FEValues classes and their descendants).

      The tricky point in this program is the use of the mean value constraint. Fortunately, there is a class in the library which knows how to handle such constraints, and we have used it quite often already, without mentioning its generality. Note that if we assume that the boundary nodes are spaced equally along the boundary, then the mean value constraint

      -\[
+<picture><source srcset=\[
   \int_{\partial \Omega} u(x) \; ds = 0
-\] +\]" src="form_2767.png"/>

      can be written as

      -\[
+<picture><source srcset=\[
   \sum_{i\in\partial\Omega_h} u_i = 0,
-\] +\]" src="form_2768.png"/>

      -

      where the sum shall run over all degree of freedom indices which are located on the boundary of the computational domain. Let us denote by $i_0$ that index on the boundary with the lowest number (or any other conveniently chosen index), then the constraint can also be represented by

      -\[
+<p> where the sum shall run over all degree of freedom indices which are located on the boundary of the computational domain. Let us denote by <picture><source srcset=$i_0$ that index on the boundary with the lowest number (or any other conveniently chosen index), then the constraint can also be represented by

      +\[
   u_{i_0} = \sum_{i\in\partial\Omega_h\backslash i_0} -u_i.
-\] +\]" src="form_2770.png"/>

      This, luckily, is exactly the form of constraints for which the AffineConstraints class was designed. Note that we have used this class in several previous examples for the representation of hanging nodes constraints, which also have this form: there, the middle vertex shall have the mean of the values of the adjacent vertices. In general, the AffineConstraints class is designed to handle affine constraints of the form

      -\[
+<picture><source srcset=\[
   CU = b
-\] +\]" src="form_2771.png"/>

      where $C$ denotes a matrix, $b$ denotes a vector, and $U$ the vector of nodal values. In this case, since $C$ represents one homogeneous constraint, $b$ is the zero vector.

      -

      In this example, the mean value along the boundary allows just such a representation, with $C$ being a matrix with just one row (i.e. there is only one constraint). In the implementation, we will create an AffineConstraints object, add one constraint (i.e. add another row to the matrix) referring to the first boundary node $i_0$, and insert the weights with which all the other nodes contribute, which in this example happens to be just $-1$.

      +

      In this example, the mean value along the boundary allows just such a representation, with $C$ being a matrix with just one row (i.e. there is only one constraint). In the implementation, we will create an AffineConstraints object, add one constraint (i.e. add another row to the matrix) referring to the first boundary node $i_0$, and insert the weights with which all the other nodes contribute, which in this example happens to be just $-1$.

      Later, we will use this object to eliminate the first boundary node from the linear system of equations, reducing it to one which has a solution without the ambiguity of the constant shift value. One of the problems of the implementation will be that the explicit elimination of this node results in a number of additional elements in the matrix, of which we do not know in advance where they are located and how many additional entries will be in each of the rows of the matrix. We will show how we can use an intermediate object to work around this problem.

      But now on to the implementation of the program solving this problem...

      The commented program

      @@ -324,8 +324,8 @@
      ::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)

      That's quite simple, right?

      Two remarks are in order, though: First, these functions are used in a lot of contexts. Maybe you want to create a Laplace or mass matrix for a vector values finite element; or you want to use the default Q1 mapping; or you want to assembled the matrix with a coefficient in the Laplace operator. For this reason, there are quite a large number of variants of these functions in the MatrixCreator and MatrixTools namespaces. Whenever you need a slightly different version of these functions than the ones called above, it is certainly worthwhile to take a look at the documentation and to check whether something fits your needs.

      -

      The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
-   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

      +

      The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
+   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

      Since the generation of the body force contributions to the right hand side vector was so simple, we do that all over again for the boundary forces as well: allocate a vector of the right size and call the right function. The boundary function has constant values, so we can generate an object from the library on the fly, and we use the same quadrature formula as above, but this time of lower dimension since we integrate over faces now instead of cells:

        Vector<double> tmp(system_rhs.size());
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-03-17 21:57:43.695235855 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-03-17 21:57:43.699235880 +0000 @@ -136,50 +136,50 @@

      The particular concern of this program are the loops of DG methods. These turn out to be especially complex, primarily because for the face terms, we have to distinguish the cases of boundary, regular interior faces and interior faces with hanging nodes, respectively. The MeshWorker::mesh_loop() handles the complexity on iterating over cells and faces and allows specifying "workers" for the different cell and face terms. The integration of face terms itself, including on adaptively refined faces, is done using the FEInterfaceValues class.

      The equation

      The model problem solved in this example is the linear advection equation

      -\[
+<picture><source srcset=\[
   \nabla\cdot \left({\mathbf \beta} u\right)=0 \qquad\mbox{in }\Omega,
-\] +\]" src="form_2774.png"/>

      subject to the boundary conditions

      -\[
+<picture><source srcset=\[
 u=g\quad\mbox{on }\Gamma_-,
-\] +\]" src="form_2775.png"/>

      -

      on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

      -\[
+<p> on the inflow part <picture><source srcset=$\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

      +\[
 \Gamma_- \dealcoloneq \{{\bf x}\in\Gamma, {\mathbf \beta}({\bf x})\cdot{\bf n}({\bf x})<0\}
-\] +\]" src="form_2779.png"/>

      -

      the inflow part of the boundary of the domain and ${\bf n}$ denotes the unit outward normal to the boundary $\Gamma$. This equation is the conservative version of the advection equation already considered in step-9 of this tutorial.

      -

      On each cell $T$, we multiply by a test function $v_h$ from the left and integrate by parts to get:

      -\[
+<p> the inflow part of the boundary of the domain and <picture><source srcset=${\bf n}$ denotes the unit outward normal to the boundary $\Gamma$. This equation is the conservative version of the advection equation already considered in step-9 of this tutorial.

      +

      On each cell $T$, we multiply by a test function $v_h$ from the left and integrate by parts to get:

      +\[
   \left( v_h, \nabla \cdot (\beta u_h) \right)_T
 = -(\nabla v_h, \beta u_h) + \int_{\partial T} v_h u_h \beta \cdot n
-\] +\]" src="form_2782.png"/>

      -

      When summing this expression over all cells $T$, the boundary integral is done over all internal and external faces and as such there are three cases:

        +

        When summing this expression over all cells $T$, the boundary integral is done over all internal and external faces and as such there are three cases:

        1. -outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
        2. +outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
        3. -outer boundary on the outflow: $\int_{\Gamma_+} v_h u_h \beta \cdot n$
        4. +outer boundary on the outflow: $\int_{\Gamma_+} v_h u_h \beta \cdot n$
        5. -inner faces (integral from two sides turns into jump, we use the upwind velocity): $\int_F [v_h] u_h^{\text{upwind}} \beta \cdot n$
        6. +inner faces (integral from two sides turns into jump, we use the upwind velocity): $\int_F [v_h] u_h^{\text{upwind}} \beta \cdot n$
        -

        Here, the jump is defined as $[v] = v^+ - v^-$, where the superscripts refer to the left ('+') and right ('-') values at the face. The upwind value $u^{\text{upwind}}$ is defined to be $u^+$ if $\beta \cdot n>0$ and $u^-$ otherwise.

        +

        Here, the jump is defined as $[v] = v^+ - v^-$, where the superscripts refer to the left ('+') and right ('-') values at the face. The upwind value $u^{\text{upwind}}$ is defined to be $u^+$ if $\beta \cdot n>0$ and $u^-$ otherwise.

        As a result, the mesh-dependent weak form reads:

        -\[
+<picture><source srcset=\[
 \sum_{T\in \mathbb T_h} -\bigl(\nabla \phi_i,{\mathbf \beta}\cdot \phi_j \bigr)_T +
 \sum_{F\in\mathbb F_h^i} \bigl< [\phi_i], \phi_j^{upwind} \beta\cdot \mathbf n\bigr>_{F} +
 \bigl<\phi_i, \phi_j \beta\cdot \mathbf n\bigr>_{\Gamma_+}
 = -\bigl<\phi_i, g \beta\cdot\mathbf n\bigr>_{\Gamma_-}.
-\] +\]" src="form_2791.png"/>

        -

        Here, $\mathbb T_h$ is the set of all active cells of the triangulation and $\mathbb F_h^i$ is the set of all active interior faces. This formulation is known as the upwind discontinuous Galerkin method.

        +

        Here, $\mathbb T_h$ is the set of all active cells of the triangulation and $\mathbb F_h^i$ is the set of all active interior faces. This formulation is known as the upwind discontinuous Galerkin method.

        In order to implement this bilinear form, we need to compute the cell terms (first sum) using the usual way to achieve integration on a cell, the interface terms (second sum) using FEInterfaceValues, and the boundary terms (the other two terms). The summation of all those is done by MeshWorker::mesh_loop().

        The test problem

        -

        We solve the advection equation on $\Omega=[0,1]^2$ with ${\mathbf \beta}=\frac{1}{|x|}(-x_2, x_1)$ representing a circular counterclockwise flow field, and $g=1$ on ${\bf x}\in\Gamma_-^1 := [0,0.5]\times\{0\}$ and $g=0$ on ${\bf x}\in
-\Gamma_-\setminus \Gamma_-^1$.

        -

        We solve on a sequence of meshes by refining the mesh adaptively by estimating the norm of the gradient on each cell. After solving on each mesh, we output the solution in vtk format and compute the $L^\infty$ norm of the solution. As the exact solution is either 0 or 1, we can measure the magnitude of the overshoot of the numerical solution with this.

        +

        We solve the advection equation on $\Omega=[0,1]^2$ with ${\mathbf \beta}=\frac{1}{|x|}(-x_2, x_1)$ representing a circular counterclockwise flow field, and $g=1$ on ${\bf x}\in\Gamma_-^1 := [0,0.5]\times\{0\}$ and $g=0$ on ${\bf x}\in
+\Gamma_-\setminus \Gamma_-^1$.

        +

        We solve on a sequence of meshes by refining the mesh adaptively by estimating the norm of the gradient on each cell. After solving on each mesh, we output the solution in vtk format and compute the $L^\infty$ norm of the solution. As the exact solution is either 0 or 1, we can measure the magnitude of the overshoot of the numerical solution with this.

        The commented program

        The first few files have already been covered in previous examples and will thus not be further commented on:

          #include <deal.II/base/quadrature_lib.h>
        @@ -256,7 +256,7 @@
         
        #define AssertDimension(dim1, dim2)
        #define AssertIndexRange(index, range)
        -

        Finally, a function that computes and returns the wind field $\beta=\beta(\mathbf x)$. As explained in the introduction, we will use a rotational field around the origin in 2d. In 3d, we simply leave the $z$-component unset (i.e., at zero), whereas the function can not be used in 1d in its current implementation:

        +

      Finally, a function that computes and returns the wind field $\beta=\beta(\mathbf x)$. As explained in the introduction, we will use a rotational field around the origin in 2d. In 3d, we simply leave the $z$-component unset (i.e., at zero), whereas the function can not be used in 1d in its current implementation:

        template <int dim>
        Tensor<1, dim> beta(const Point<dim> &p)
        {
      @@ -623,7 +623,7 @@
        }
       
       
      -

      We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

      +

      We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

        template <int dim>
        void AdvectionProblem<dim>::refine_grid()
        {
      @@ -637,7 +637,7 @@
        gradient_indicator);
       
      void approximate_gradient(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const InputVector &solution, Vector< float > &derivative_norm, const unsigned int component=0)
      -

      and they are cell-wise scaled by the factor $h^{1+d/2}$

      +

      and they are cell-wise scaled by the factor $h^{1+d/2}$

        unsigned int cell_no = 0;
        for (const auto &cell : dof_handler.active_cell_iterators())
        gradient_indicator(cell_no++) *=
      @@ -820,11 +820,11 @@ 4   5   -

      In refinement iteration 5, the image can't be plotted in a reasonable way any more as a 3d plot. We thus show a color plot with a range of $[-1,2]$ (the solution values of the exact solution lie in $[0,1]$, of course). In any case, it is clear that the continuous Galerkin solution exhibits oscillatory behavior that gets worse and worse as the mesh is refined more and more.

      +

      In refinement iteration 5, the image can't be plotted in a reasonable way any more as a 3d plot. We thus show a color plot with a range of $[-1,2]$ (the solution values of the exact solution lie in $[0,1]$, of course). In any case, it is clear that the continuous Galerkin solution exhibits oscillatory behavior that gets worse and worse as the mesh is refined more and more.

      There are a number of strategies to stabilize the cG method, if one wants to use continuous elements for some reason. Discussing these methods is beyond the scope of this tutorial program; an interested reader could, for example, take a look at step-31.

      Possibilities for extensions

      -

      Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

      -

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      +

      Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

      +

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      A different direction is to observe that the solution of transport problems often has discontinuities and that therefore a mesh in which we bisect every cell in every coordinate direction may not be optimal. Rather, a better strategy would be to only cut cells in the direction parallel to the discontinuity. This is called anisotropic mesh refinement and is the subject of step-30.

      The plain program

      /* ---------------------------------------------------------------------
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_12b.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_12b.html 2024-03-17 21:57:43.747236176 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_12b.html 2024-03-17 21:57:43.751236201 +0000 @@ -197,7 +197,7 @@
       
      #define AssertDimension(dim1, dim2)
      #define AssertIndexRange(index, range)
      -

      Finally, a function that computes and returns the wind field $\beta=\beta(\mathbf x)$. As explained in the introduction, we will use a rotational field around the origin in 2d. In 3d, we simply leave the $z$-component unset (i.e., at zero), whereas the function can not be used in 1d in its current implementation:

      +

      Finally, a function that computes and returns the wind field $\beta=\beta(\mathbf x)$. As explained in the introduction, we will use a rotational field around the origin in 2d. In 3d, we simply leave the $z$-component unset (i.e., at zero), whereas the function can not be used in 1d in its current implementation:

        template <int dim>
        Tensor<1, dim> beta(const Point<dim> &p)
        {
      @@ -466,8 +466,8 @@
        fe_face_values.shape_value(i, point) *
        JxW[point];
       
      -

      We additionally assemble the term $(\beta\cdot n u,\hat
-   v)_{\partial \kappa_+}$,

      +

      We additionally assemble the term $(\beta\cdot n u,\hat
+   v)_{\partial \kappa_+}$,

        for (unsigned int k = 0; k < neighbor_dofs_per_cell; ++k)
        for (unsigned int j = 0; j < dofs_per_cell; ++j)
        u1_v2_matrix(k, j) +=
      @@ -487,8 +487,8 @@
        fe_face_values.shape_value(i, point) *
        JxW[point];
       
      -

      And this is another new one: $(\beta\cdot n \hat u,\hat
-   v)_{\partial \kappa_-}$:

      +

      And this is another new one: $(\beta\cdot n \hat u,\hat
+   v)_{\partial \kappa_-}$:

        for (unsigned int k = 0; k < neighbor_dofs_per_cell; ++k)
        for (unsigned int l = 0; l < neighbor_dofs_per_cell; ++l)
        u2_v2_matrix(k, l) +=
      @@ -523,7 +523,7 @@
        }
       
       
      -

      We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

      +

      We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

        template <int dim>
        void AdvectionProblem<dim>::refine_grid()
        {
      @@ -537,7 +537,7 @@
        gradient_indicator);
       
      void approximate_gradient(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const InputVector &solution, Vector< float > &derivative_norm, const unsigned int component=0)
      -

      and they are cell-wise scaled by the factor $h^{1+d/2}$

      +

      and they are cell-wise scaled by the factor $h^{1+d/2}$

        unsigned int cell_no = 0;
        for (const auto &cell : dof_handler.active_cell_iterators())
        gradient_indicator(cell_no++) *=
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-03-17 21:57:43.887237041 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-03-17 21:57:43.883237016 +0000 @@ -163,30 +163,30 @@

      The Heidelberg group of Professor Rolf Rannacher, to which the three initial authors of the deal.II library belonged during their PhD time and partly also afterwards, has been involved with adaptivity and error estimation for finite element discretizations since the mid-1990ies. The main achievement is the development of error estimates for arbitrary functionals of the solution, and of optimal mesh refinement for its computation.

      We will not discuss the derivation of these concepts in too great detail, but will implement the main ideas in the present example program. For a thorough introduction into the general idea, we refer to the seminal work of Becker and Rannacher [BR95], [BR96r], and the overview article of the same authors in Acta Numerica [BR01]; the first introduces the concept of error estimation and adaptivity for general functional output for the Laplace equation, while the second gives many examples of applications of these concepts to a large number of other, more complicated equations. For applications to individual types of equations, see also the publications by Becker [Bec95], [Bec98], Kanschat [Kan96], [FK97], Suttmeier [Sut96], [RS97], [RS98c], [RS99], Bangerth [BR99b], [Ban00w], [BR01a], [Ban02], and Hartmann [Har02], [HH01], [HH01b]. All of these works, from the original introduction by Becker and Rannacher to individual contributions to particular equations, have later been summarized in a book by Bangerth and Rannacher that covers all of these topics, see [BR03].

      The basic idea is the following: in applications, one is not usually interested in the solution per se, but rather in certain aspects of it. For example, in simulations of flow problems, one may want to know the lift or drag of a body immersed in the fluid; it is this quantity that we want to know to best accuracy, and whether the rest of the solution of the describing equations is well resolved is not of primary interest. Likewise, in elasticity one might want to know about values of the stress at certain points to guess whether maximal load values of joints are safe, for example. Or, in radiative transfer problems, mean flux intensities are of interest.

      -

      In all the cases just listed, it is the evaluation of a functional $J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      -

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      -\[
+<p>In all the cases just listed, it is the evaluation of a functional <picture><source srcset=$J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      +

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      +\[
   a(\varphi,z) = J(\varphi) \qquad \forall \varphi,
-\] +\]" src="form_2820.png"/>

      -

      where $a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      -\[
+<p> where <picture><source srcset=$a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      +\[
   J(e) = a(e,z)
-\] +\]" src="form_2823.png"/>

      and we can, by Galerkin orthogonality, rewrite this as

      -\[
+<picture><source srcset=\[
   J(e) = a(e,z-\varphi_h)
-\] +\]" src="form_2824.png"/>

      -

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      +

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      Concretely, for Laplace's equation, the error identity reads

      -\[
+<picture><source srcset=\[
   J(e) = (\nabla e, \nabla(z-\varphi_h)).
-\] +\]" src="form_2826.png"/>

      Because we want to use this formula not only to compute error, but also to refine the mesh, we need to rewrite the expression above as a sum over cells where each cell's contribution can then be used as an error indicator for this cell. Thus, we split the scalar products into terms for each cell, and integrate by parts on each of them:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (\nabla (u-u_h), \nabla (z-\varphi_h))_K
@@ -194,54 +194,54 @@
   &=&
   \sum_K (-\Delta (u-u_h), z-\varphi_h)_K
   + (\partial_n (u-u_h), z-z_h)_{\partial K}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2827.png"/>

      -

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      +

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      Thus, we have

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - (\partial_n u_h, z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2832.png"/>

      -

      In a final step, note that when taking the normal derivative of $u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      -\begin{eqnarray*}
+<p> In a final step, note that when taking the normal derivative of <picture><source srcset=$u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - \frac 12 (\partial_n u_h|_K + \partial_{n'} u_h|_{K'},
               z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2833.png"/>

      -

      Using that for the normal vectors on adjacent cells we have $n'=-n$, we define the jump of the normal derivative by

      -\[
+<p> Using that for the normal vectors on adjacent cells we have <picture><source srcset=$n'=-n$, we define the jump of the normal derivative by

      +\[
   [\partial_n u_h] \dealcoloneq \partial_n u_h|_K + \partial_{n'} u_h|_{K'}
   =
   \partial_n u_h|_K - \partial_n u_h|_{K'},
-\] +\]" src="form_2835.png"/>

      -

      and get the final form after setting the discrete function $\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      -\begin{eqnarray*}
+<p> and get the final form after setting the discrete function <picture><source srcset=$\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-I_h z)_K
   - \frac 12 ([\partial_n u_h],
               z-I_h z)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2837.png"/>

      -

      With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals $J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      -

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      -\[
+<p>With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals <picture><source srcset=$J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      +

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      +\[
   -\Delta z = \delta(x-x_0),
-\] +\]" src="form_2843.png"/>

      with the Dirac delta function on the right hand side, and the dual solution is the Green's function with respect to the point $x_0$. For simple geometries, this function is analytically known, and we could insert it into the error representation formula.

      -

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      +

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      With this, we end the discussion of the mathematical side of this program and turn to the actual implementation.

      -
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
-u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
-u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.
      +
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
+u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
+u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.

      The software

      The step-14 example program builds heavily on the techniques already used in the step-13 program. Its implementation of the dual weighted residual error estimator explained above is done by deriving a second class, properly called DualSolver, from the Solver base class, and having a class (WeightedResidual) that joins the two again and controls the solution of the primal and dual problem, and then uses both to compute the error indicator for mesh refinement.

      The program continues the modular concept of the previous example, by implementing the dual functional, describing quantity of interest, by an abstract base class, and providing two different functionals which implement this interface. Adding a different quantity of interest is thus simple.

      @@ -2576,15 +2576,15 @@

      Note the subtle interplay between resolving the corner singularities, and resolving around the point of evaluation. It will be rather difficult to generate such a mesh by hand, as this would involve to judge quantitatively how much which of the four corner singularities should be resolved, and to set the weight compared to the vicinity of the evaluation point.

      The program prints the point value and the estimated error in this quantity. From extrapolating it, we can guess that the exact value is somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get almost 6 valid digits from only 22,000 (primal) degrees of freedom. This number cannot be obtained from the value of the functional alone, but I have used the assumption that the error estimator is mostly exact, and extrapolated the computed value plus the estimated error, to get an approximation of the true value. Computing with more degrees of freedom shows that this assumption is indeed valid.

      -

      From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

      +

      From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

      Comparing refinement criteria

      -

      Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

      +

      Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

      -

      Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

      +

      Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

      Evaluation of point stresses

      Besides evaluating the values of the solution at a certain point, the program also offers the possibility to evaluate the x-derivatives at a certain point, and also to tailor mesh refinement for this. To let the program compute these quantities, simply replace the two occurrences of PointValueEvaluation in the main function by PointXDerivativeEvaluation, and let the program run:

      Refinement cycle: 0
      Number of degrees of freedom=72
      @@ -2636,16 +2636,16 @@ -

      Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

      -

      Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

      +

      Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

      +

      Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

      In the left part of the following chart, you again see the convergence of the error towards this extrapolated value, while on the right you see a comparison of true and estimated error:

      -

      After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

      +

      After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

      step-13 revisited

      -

      If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

      +

      If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

      First, the meshes after 9 adaptive refinement cycles obtained with the point evaluation and derivative evaluation refinement criteria, respectively, look like this:

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-03-17 21:57:43.951237436 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-03-17 21:57:43.951237436 +0000 @@ -144,41 +144,41 @@

      Introduction

      Foreword

      -

      This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

      +

      This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

      Because the equation is non-linear, we can't solve it directly. Rather, we have to use Newton's method to compute the solution iteratively.

      Note
      The material presented here is also discussed in video lecture 31.5, video lecture 31.55, video lecture 31.6. (All video lectures are also available here.) (See also video lecture 31.65, video lecture 31.7.)

      Classical formulation

      In a classical sense, the problem is given in the following form:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
     -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_2858.png"/>

      -

      $\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

      -

      As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

      -\begin{align*}
+<p><picture><source srcset=$\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

      +

      As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

      +\begin{align*}
     F'(u^{n},\delta u^{n})&=- F(u^{n})
     \\
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_2862.png"/>

      with

      -\[
+<picture><source srcset=\[
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
-  \] + \]" src="form_2863.png"/>

      -

      and $F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

      -\[
+<p> and <picture><source srcset=$F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

      +\[
   F'(u,\delta u)=\lim \limits_{\epsilon \rightarrow 0}{\frac{F(u+\epsilon \delta u)-
   F(u)}{\epsilon}}.
-\] +\]" src="form_2866.png"/>

      -

      Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

      +

      Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

      -\[
+<picture><source srcset=\[
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right) +
   \nabla \cdot \left( \frac{\nabla u^{n} \cdot
@@ -186,62 +186,62 @@
   \right)  =
   -\left( - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}
   \nabla u^{n} \right) \right)
-  \] + \]" src="form_2868.png"/>

      -

      In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

      -

      Summing up, we have to solve the PDE above with the boundary condition $\delta
-u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

      -
      Note
      In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.
      +

      In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

      +

      Summing up, we have to solve the PDE above with the boundary condition $\delta
+u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

      +
      Note
      In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.

      Weak formulation of the problem

      -

      Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function $\varphi$ and integrating by parts on both sides:

      -\[
+<p>Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function <picture><source srcset=$\varphi$ and integrating by parts on both sides:

      +\[
   \left( \nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right)-\left(\nabla \varphi ,\frac{\nabla u^{n} \cdot \nabla
   \delta u^{n}}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{3}{2}}}\nabla u^{n}  \right)
   = -\left(\nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}} \nabla u^{n}
    \right).
-  \] + \]" src="form_2876.png"/>

      -

      Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
-\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

      +

      Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
+\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

      -\[
+<picture><source srcset=\[
   \delta u^{n}=\sum_{j=0}^{N-1} \delta U_{j} \varphi_{j}.
-\] +\]" src="form_2879.png"/>

      -

      Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
-{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

      +

      Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
+{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

      -\[
+<picture><source srcset=\[
   \sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right) \right] \cdot \delta U_{j}=-\left( \nabla \varphi_{i} , a_{n}
   \nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1,
-\] +\]" src="form_2881.png"/>

      -

      where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

      +

      where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

      -\[
+<picture><source srcset=\[
   A^{n}\; \delta U^{n}=b^{n},
-\] +\]" src="form_2883.png"/>

      -

      where the entries of the matrix $A^{n}$ are given by:

      +

      where the entries of the matrix $A^{n}$ are given by:

      -\[
+<picture><source srcset=\[
   A^{n}_{ij} \dealcoloneq \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right),
-\] +\]" src="form_2885.png"/>

      -

      and the right hand side $b^{n}$ is given by:

      +

      and the right hand side $b^{n}$ is given by:

      -\[
+<picture><source srcset=\[
   b^{n}_{i} \dealcoloneq -\left( \nabla \varphi_{i} , a_{n} \nabla u^{n}\right).
-\] +\]" src="form_2887.png"/>

      Questions about the appropriate solver

      The matrix that corresponds to the Newton step above can be reformulated to show its structure a bit better. Rewriting it slightly, we get that it has the form

      -\[
+<picture><source srcset=\[
   A_{ij}
   =
   \left(
@@ -249,10 +249,10 @@
     B
     \nabla \varphi_j
   \right),
-\] +\]" src="form_2888.png"/>

      -

      where the matrix $B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

      -\[
+<p> where the matrix <picture><source srcset=$B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

      +\[
   B
   =
   a_n \left\{
@@ -267,44 +267,44 @@
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}} \otimes
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}}
   \right\}.
-\] +\]" src="form_2890.png"/>

      -

      From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
-\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

      -

      It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

      +

      From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
+\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

      +

      It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

      Choice of step length and globalization

      -

      As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
-\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

      -

      A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

      +

      As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
+\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

      +

      A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

      In this program, we simply always choose the step length equal to 0.1. This makes sure that for the testcase at hand we do get convergence although it is clear that by not eventually reverting to full step lengths we forego the rapid, quadratic convergence that makes Newton's method so appealing. Obviously, this is a point one eventually has to address if the program was made into one that is meant to solve more realistic problems. We will comment on this issue some more in the results section, and use an even better approach in step-77.

      Summary of the algorithm and testcase

      Overall, the program we have here is not unlike step-6 in many regards. The layout of the main class is essentially the same. On the other hand, the driving algorithm in the run() function is different and works as follows:

      1. -

        Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in MinimalSurfaceProblem::set_boundary_values). Set $n=0$.

        +

        Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in MinimalSurfaceProblem::set_boundary_values). Set $n=0$.

      2. -

        Compute the Newton update by solving the system $A^{n}\;\delta
-  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

        +

        Compute the Newton update by solving the system $A^{n}\;\delta
+  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

      3. -

        Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

        +

        Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-03-17 21:57:44.011237807 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-03-17 21:57:44.011237807 +0000 @@ -141,7 +141,7 @@
        -

        The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

        +

        The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

        The testcase

        The problem we solve here is similar to step-6, with two main differences: first, the multigrid preconditioner, obviously. We also change the discontinuity of the coefficients such that the local assembler does not look more complicated than necessary.

        The commented program

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-03-17 21:57:44.111238425 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-03-17 21:57:44.107238400 +0000 @@ -154,23 +154,23 @@

        Quasistatic elastic deformation

        Motivation of the model

        In general, time-dependent small elastic deformations are described by the elastic wave equation

        -\[
+<picture><source srcset=\[
   \rho \frac{\partial^2 \mathbf{u}}{\partial t^2}
   + c \frac{\partial \mathbf{u}}{\partial t}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) = \mathbf{f}
   \qquad
   \textrm{in}\ \Omega,
-\] +\]" src="form_2939.png"/>

        -

        where $\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

        -\[
+<p> where <picture><source srcset=$\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

        +\[
   \mathbf{u}(\cdot, 0) = \mathbf{u}_0(\cdot)
   \qquad
   \textrm{on}\ \Omega,
-\] +\]" src="form_2942.png"/>

        and Dirichlet (displacement) or Neumann (traction) boundary conditions need to be specified for a unique solution:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}(\mathbf{x},t) &=& \mathbf{d}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_D\subset\partial\Omega,
@@ -178,12 +178,12 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2943.png"/>

        -

        In above formulation, $\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
-\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

        -

        The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

        -\begin{eqnarray*}
+<p> In above formulation, <picture><source srcset=$\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
+\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

        +

        The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

        +\begin{eqnarray*}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega,
@@ -195,13 +195,13 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2949.png"/>

        -

        Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

        +

        Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

        While these equations are sufficient to describe small deformations, computing large deformations is a little more complicated and, in general, leads to nonlinear equations such as those treated in step-44. In the following, let us consider some of the tools one would employ when simulating problems in which the deformation becomes large.

        Note
        The model we will consider below is not founded on anything that would be mathematically sound: we will consider a model in which we produce a small deformation, deform the physical coordinates of the body by this deformation, and then consider the next loading step again as a linear problem. This isn't consistent, since the assumption of linearity implies that deformations are infinitesimal and so moving around the vertices of our mesh by a finite amount before solving the next linear problem is an inconsistent approach. We should therefore note that it is not surprising that the equations discussed below can't be found in the literature: The model considered here has little to do with reality! On the other hand, the implementation techniques we consider are very much what one would need to use when implementing a real model, as we will see in step-44.
        -

        To come back to defining our "artificial" model, let us first introduce a tensorial stress variable $\sigma$, and write the differential equations in terms of the stress:

        -\begin{eqnarray*}
+<p>To come back to defining our $\sigma$, and write the differential equations in terms of the stress:

        +\begin{eqnarray*}
   - \textrm{div}\  \sigma &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega(t),
@@ -213,30 +213,30 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega(t)\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2951.png"/>

        -

        Note that these equations are posed on a domain $\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

        -\[
+<p> Note that these equations are posed on a domain <picture><source srcset=$\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

        +\[
   \dot\sigma = C \varepsilon (\dot{\mathbf{u}}),
   \qquad
   \qquad
   \textrm{[stress-strain]}
-\] +\]" src="form_2954.png"/>

        -

        where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

        +

        where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

        Time discretization

        -

        Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step $n$:

        -\[
+<p>Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step <picture><source srcset=$n$:

        +\[
   -\textrm{div}\  \sigma^n = f^n,
-\] +\]" src="form_2956.png"/>

        where

        -\[
+<picture><source srcset=\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n),
-\] +\]" src="form_2957.png"/>

        -

        and $\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

        -\begin{align*}
+<p> and <picture><source srcset=$\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

        +\begin{align*}
   - \textrm{div}\   C \varepsilon(\Delta\mathbf{u}^n) &= \mathbf{f} + \textrm{div}\  \sigma^{n-1}
   \qquad
   &&\textrm{in}\ \Omega(t_{n-1}),
@@ -248,11 +248,11 @@
   \mathbf{n} \ C \varepsilon(\Delta \mathbf{u}^n(\mathbf{x},t)) &= \mathbf{b}(\mathbf{x},t_n)-\mathbf{b}(\mathbf{x},t_{n-1})
   \qquad
   &&\textrm{on}\ \Gamma_N=\partial\Omega(t_{n-1})\backslash\Gamma_D.
-\end{align*} +\end{align*}" src="form_2960.png"/>

        -

        The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find $\Delta \mathbf{u}^n \in
-\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

        -\begin{align*}
+<p> The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find <picture><source srcset=$\Delta \mathbf{u}^n \in
+\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

        +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -264,12 +264,12 @@
   \\
   &\qquad\qquad
   \forall \varphi \in \{\mathbf{v}\in H^1(\Omega(t_{n-1}))^d: \mathbf{v}|_{\Gamma_D}=0\}.
-\end{align*} +\end{align*}" src="form_2962.png"/>

        -

        Using that $\sigma^{n-1} \mathbf{n}
+<p> Using that <picture><source srcset=$\sigma^{n-1} \mathbf{n}
             = [C \varepsilon(\mathbf{u}^{n-1})] \mathbf{n}
-            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

        -\begin{align*}
+            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

        +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -281,32 +281,32 @@
   \qquad
   \qquad
   \textrm{[linear-system]}
-\end{align*} +\end{align*}" src="form_2964.png"/>

        -

        We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
-)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

        -

        The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

        +

        We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
+)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

        +

        The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

        There are differences, however:

        1. We have to move (update) the mesh after each time step, in order to be able to solve the next time step on a new domain;

        2. -We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.
        3. +We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.

        These two operations are done in the functions move_mesh and update_quadrature_point_history in the program. While moving the mesh is only a technicality, updating the stress is a little more complicated and will be discussed in the next section.

        Updating the stress variable

        -

        As indicated above, we need to have the stress variable $\sigma^n$ available when computing time step $n+1$, and we can compute it using

        -\[
+<p>As indicated above, we need to have the stress variable <picture><source srcset=$\sigma^n$ available when computing time step $n+1$, and we can compute it using

        +\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n).
   \qquad
   \qquad
   \textrm{[stress-update]}
-\] +\]" src="form_2973.png"/>

        -

        There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store $\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

        -

        To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

        -\[
+<p> There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store <picture><source srcset=$\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

        +

        To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

        +\[
   (\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}
   =
   \sum_{K\subset {T}}
@@ -315,12 +315,12 @@
   \sum_{K\subset {T}}
   \sum_q
   w_q \ \sigma^{n-1}(\mathbf{x}_q) : \varepsilon(\varphi(\mathbf{x}_q),
-\] +\]" src="form_2977.png"/>

        -

        where $w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

        -

        The second complication is not visible in our notation as chosen above. It is due to the fact that we compute $\Delta u^n$ on the domain $\Omega(t_{n-1})$, and then use this displacement increment to both update the stress as well as move the mesh nodes around to get to $\Omega(t_n)$ on which the next increment is computed. What we have to make sure, in this context, is that moving the mesh does not only involve moving around the nodes, but also making corresponding changes to the stress variable: the updated stress is a variable that is defined with respect to the coordinate system of the material in the old domain, and has to be transferred to the new domain. The reason for this can be understood as follows: locally, the incremental deformation $\Delta\mathbf{u}$ can be decomposed into three parts, a linear translation (the constant part of the displacement increment field in the neighborhood of a point), a dilational component (that part of the gradient of the displacement field that has a nonzero divergence), and a rotation. A linear translation of the material does not affect the stresses that are frozen into it – the stress values are simply translated along. The dilational or compressional change produces a corresponding stress update. However, the rotational component does not necessarily induce a nonzero stress update (think, in 2d, for example of the situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta
-\mathbf{u})=0$). Nevertheless, if the material was prestressed in a certain direction, then this direction will be rotated along with the material. To this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that describes, in each point the rotation due to the displacement increments. It is not hard to see that the actual dependence of $R$ on $\Delta \mathbf{u}^n$ can only be through the curl of the displacement, rather than the displacement itself or its full gradient (as mentioned above, the constant components of the increment describe translations, its divergence the dilational modes, and the curl the rotational modes). Since the exact form of $R$ is cumbersome, we only state it in the program code, and note that the correct updating formula for the stress variable is then

        -\[
+<p> where <picture><source srcset=$w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-03-17 21:57:44.195238943 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-03-17 21:57:44.199238968 +0000 @@ -148,135 +148,135 @@

        The finite element method in general, and deal.II in particular, were invented to solve partial differential equations – in other words, to solve continuum mechanics problems. On the other hand, sometimes one wants to solve problems in which it is useful to track individual objects ("particles") and how their positions evolve. If this simply leads to a set of ordinary differential equations, for example if you want to track the positions of the planets in the solar system over time, then deal.II is clearly not the right tool. On the other hand, if this evolution is due to the interaction with the solution of partial differential equations, or if having a mesh to determine which particles interact with others (such as in the smoothed particle hydrodynamics (SPH) method), then deal.II has support for you.

        The case we will consider here is how electrically charged particles move through an electric field. As motivation, we will consider cathode rays: Electrons emitted by a heated piece of metal that is negatively charged (the "cathode"), and that are then accelerated by an electric field towards the positively charged electrode (the "anode"). The anode is typically ring-shaped so that the majority of electrons can fly through the hole in the form of an electron beam. In the olden times, they might then have illuminated the screen of a TV built from a cathode ray tube. Today, instead, electron beams are useful in X-ray machines, electron beam lithography, electron beam welding, and a number of other areas.

        The equations we will then consider are as follows: First, we need to describe the electric field. This is most easily accomplished by noting that the electric potential $V$ satisfied the equation

        -\[
+<picture><source srcset=\[
   -\epsilon_0 \Delta V = \rho
-\] +\]" src="form_3008.png"/>

        -

        where $\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

        -\begin{align*}
+<p> where <picture><source srcset=$\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

        +\begin{align*}
   V &= -V_0 && \text{on}\; \Gamma_\text{cathode}\subset\partial\Omega \\
   V &= +V_0 && \text{on}\; \Gamma_\text{anode}\subset\partial\Omega \\
   \epsilon\frac{\partial V}{\partial n} &= 0
    && \text{on}\; \partial\Omega\setminus\Gamma_\text{cathode}\setminus\Gamma_\text{anode}.
-\end{align*} +\end{align*}" src="form_3010.png"/>

        -

        In other words, we prescribe voltages $+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

        -

        Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

        -\[
+<p> In other words, we prescribe voltages <picture><source srcset=$+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

        +

        Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

        +\[
   m {\ddot {\mathbf x}}_i = e\mathbf E,
-\] +\]" src="form_3015.png"/>

        -

        where $m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

        -\begin{align*}
+<p> where <picture><source srcset=$m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

        +\begin{align*}
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m}, \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i.
-\end{align*} +\end{align*}" src="form_3017.png"/>

        -

        The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

        -

        There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

        -

        First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

        +

        The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

        +

        There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

        +

        First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

        Second, in principle we would have to model the charge density via

        -\[
+<picture><source srcset=\[
   \rho(\mathbf x) = \sum_i e\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3022.png"/>

        -

        The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is $6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

        -\[
+<p> The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is <picture><source srcset=$6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

        +\[
   (Nm) {\ddot {\mathbf x}}_i = (Ne)\mathbf E,
-\] +\]" src="form_3026.png"/>

        -

        which is of course exactly the same as above after dividing both sides by $N$. On the other hand, the charge density for these "clumps" of electrons is given by

        -\[
+<p> which is of course exactly the same as above after dividing both sides by <picture><source srcset=$N$. On the other hand, the charge density for these "clumps" of electrons is given by

        +\[
   \rho(\mathbf x) = \sum_i (Ne)\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3027.png"/>

        -

        It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

        -

        As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

        +

        It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

        +

        As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

        Time discretization

        The equations outlined above then form a set of coupled differential equations. Let us bring them all together in one place again to make that clear:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\epsilon_0 \Delta V &= \sum_i e\delta(\mathbf x-\mathbf x_i)
   \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i,
   \\
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m} = \frac{e\mathbf \nabla V}{m}.
-\end{align*} +\end{align*}" src="form_3031.png"/>

        Because of the awkward dependence of the electric potential on the particle locations, we don't want to solve this as a coupled system but instead use a decoupled approach where we first solve for the potential in each time step and then the particle locations. (One could also do it the other way around, of course.) This is very much in the same spirit as we do in step-21, step-31, and step-32, to name just a few, and can all be understood in the context of the operator splitting methods discussed in step-58.

        -

        So, if we denote by an upper index $n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

        -\begin{align*}
+<p>So, if we denote by an upper index <picture><source srcset=$n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

        +\begin{align*}
   -\epsilon_0 \Delta V^{(n)} &= \sum_i e\delta(\mathbf x-\mathbf x_i^{(n-1)})
   \\
   \frac{{\mathbf v}_i^{(n)}-{\mathbf v}_i^{(n-1)}}{\Delta t} &= \frac{e\nabla V^{(n)}}{m}
   \\
   \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} &= {\mathbf v}_i^{(n)}.
-\end{align*} +\end{align*}" src="form_3032.png"/>

        -

        This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

        -

        There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

        -\[
+<p> This scheme can be understood in the framework of operator splitting methods (specifically, the $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

        +

        There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

        +\[
   \Delta t \le \frac{h_i}{\|\mathbf v_i\|} \qquad\qquad \forall i,
-\] +\]" src="form_3035.png"/>

        or equivalently

        -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3036.png"/>

        -

        Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

        +

        Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

        On the other hand, a particle might already be at the boundary of one cell and the neighboring cell might be once further refined. So then the time to cross that neighboring cell would actually be half the amount above, suggesting

        -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3037.png"/>

        But even that is not good enough: The formula above updates the particle positions in each time using the formula

        -\[
+<picture><source srcset=\[
 \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} = {\mathbf v}_i^{(n)},
-\] +\]" src="form_3038.png"/>

        -

        that is, using the current velocity ${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

        -\[
+<p> that is, using the <em>current</em> velocity <picture><source srcset=${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

        +\[
   \Delta t^{(n)} =
   c_\text{safety} \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i^{(n-1)}\|}.
-\] +\]" src="form_3042.png"/>

        -

        How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

        +

        How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

        There is only one other case we ought to consider: What happens in the very first time step? There, any particles to be moved along have just been created, but they have a zero velocity. So we don't know what velocity we should choose for them. Of course, in all other time steps there are also particles that have just been created, but in general, the particles with the highest velocity limit the time step size and so the newly created particles with their zero velocity don't matter. But if we only have such particles?

        -

        In that case, we can use the following approximation: If a particle starts at $\mathbf v^{(0)}=0$, then the update formula tells us that

        -\[
+<p>In that case, we can use the following approximation: If a particle starts at <picture><source srcset=$\mathbf v^{(0)}=0$, then the update formula tells us that

        +\[
   {\mathbf v}_i^{(1)} = \frac{e\nabla V^{(1)}}{m} \Delta t,
-\] +\]" src="form_3048.png"/>

        and consequently

        -\[
+<picture><source srcset=\[
     \frac{{\mathbf x}_i^{(1)}-{\mathbf x}_i^{(0)}}{\Delta t} = {\mathbf v}_i^{(1)},
-\] +\]" src="form_3049.png"/>

        which we can write as

        -\[
+<picture><source srcset=\[
     {\mathbf x}_i^{(1)} - {\mathbf x}_i^{(0)} = \frac{e\nabla V^{(1)}}{m} \Delta t^2.
-\] +\]" src="form_3050.png"/>

        -

        Not wanting to move a particle by more than $\frac 12 h_i$ then implies that we should choose the time step as

        -\[
+<p> Not wanting to move a particle by more than <picture><source srcset=$\frac 12 h_i$ then implies that we should choose the time step as

        +\[
   \Delta t
   \le
   \min_i
   \sqrt{ \frac{h_i m}{e \|\nabla V^{(1)}\| }}.
-\] +\]" src="form_3052.png"/>

        Using the same argument about neighboring cells possibly being smaller by a factor of two then leads to the final formula for time step zero:

        -\[
+<picture><source srcset=\[
   \Delta t
   =
   \min_i
   \sqrt{ \frac{\frac 12 h_i m}{e \|\nabla V^{(1)}\| } }.
-\] +\]" src="form_3053.png"/>

        -

        Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

        -

        We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

        +

        Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

        +

        We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

        Spatial discretization

        -

        Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

        +

        Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

        Dealing with particles programmatically

        Adding and moving particles is, in practice, not very difficult in deal.II. To add one, the create_particles() function of this program simply uses a code snippet of the following form:

        new_particle.set_location(location);
        @@ -289,7 +289,7 @@
        void set_reference_location(const Point< dim > &new_reference_location)
        Definition particle.h:542
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-03-17 21:57:44.247239264 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-03-17 21:57:44.243239239 +0000 @@ -119,14 +119,14 @@

      Introduction

      Note
      The material presented here is also discussed in video lecture 9. (All video lectures are also available here.)
      -

      The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

      -

      In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

      -

      In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

      -

      The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
-x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

      +

      The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

      +

      In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

      +

      In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

      +

      The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
+x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

      Enumerating degrees of freedom

      -

      Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

      +

      Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

      Defining degrees of freedom ("DoF"s in short) on a mesh is, in practice, a rather simple task, since the library does all the work for you. Essentially, all you have to do is create a finite element object (from one of the many finite element classes deal.II already has, see for example the Finite element space descriptions documentation) and give it to a DoFHandler object through the DoFHandler::distribute_dofs() function ("distributing DoFs" is the term we use to describe the process of enumerating the basis functions as discussed above). The DoFHandler is a class that knows which degrees of freedom live where, i.e., it can answer questions like "how many degrees of freedom are there globally" and "on this cell, give me the global indices of the shape functions that live here". This is the sort of information you need when determining how big your system matrix should be, and when copying the contributions of a single cell into the global matrix.

      The first task of the current program is therefore to take a mesh and a finite element, and enumerate the degrees of freedom. In the current context, this means simply giving each vertex of the mesh a DoF index. Once that has happened, we will output in a picture which vertex ended up with which DoF index. You can find the corresponding pictures in the results section of this tutorial.

      @@ -135,11 +135,11 @@

      The next step would then be to compute a matrix and right hand side corresponding to a particular differential equation using this finite element and mesh. We will keep this step for the step-3 program and rather talk about one practical aspect of a finite element program, namely that finite element matrices are always very sparse: almost all entries in these matrices are zero.

      To be more precise, we say that a matrix is sparse if the number of nonzero entries per row in the matrix is bounded by a number that is independent of the overall number of degrees of freedom. For example, the simple 5-point stencil of a finite difference approximation of the Laplace equation leads to a sparse matrix since the number of nonzero entries per row is five, and therefore independent of the total size of the matrix. For more complicated problems – say, the Stokes problem of step-22 – and in particular in 3d, the number of entries per row may be several hundred. But the important point is that this number is independent of the overall size of the problem: If you refine the mesh, the maximal number of unknowns per row remains the same.

      Sparsity is one of the distinguishing feature of the finite element method compared to, say, approximating the solution of a partial differential equation using a Taylor expansion and matching coefficients, or using a Fourier basis.

      -

      In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

      -

      The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

      +

      In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

      +

      The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

      How degrees of freedom are enumerated

      By default, the DoFHandler class enumerates degrees of freedom on a mesh using an algorithm that is difficult to describe and leads to results that do look right if you know what it is doing but otherwise appears rather random; consequently, the sparsity pattern is also not optimized for any particular purpose. To show this, the code below will demonstrate a simple way to output the "sparsity pattern" that corresponds to a DoFHandler, i.e., an object that represents all of the potentially nonzero elements of a matrix one may build when discretizing a partial differential equation on a mesh and its DoFHandler. This lack of structure in the sparsity pattern will be apparent from the pictures we show below.

      -

      For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

      +

      For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

      In the program below, we will use the algorithm of Cuthill and McKee to do so. We will show the sparsity pattern for both the original enumeration of degrees of freedom and of the renumbered version below, in the results section.

      The commented program

      The first few includes are just like in the previous program, so do not require additional comments:

      @@ -277,7 +277,7 @@
       

      Renumbering of DoFs

      In the sparsity pattern produced above, the nonzero entries extended quite far off from the diagonal. For some algorithms, for example for incomplete LU decompositions or Gauss-Seidel preconditioners, this is unfavorable, and we will show a simple way how to improve this situation.

      -

      Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

      +

      Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

      This can be accomplished by a simple front marching algorithm, where one starts at a given vertex and gives it the index zero. Then, its neighbors are numbered successively, making their indices close to the original one. Then, their neighbors, if not yet numbered, are numbered, and so on.

      One algorithm that adds a little bit of sophistication along these lines is the one by Cuthill and McKee. We will use it in the following function to renumber the degrees of freedom such that the resulting sparsity pattern is more localized around the diagonal. The only interesting part of the function is the first call to DoFRenumbering::Cuthill_McKee, the rest is essentially as before:

        void renumber_dofs(DoFHandler<2> &dof_handler)
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-03-17 21:57:44.323239733 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-03-17 21:57:44.327239758 +0000 @@ -149,98 +149,98 @@

      Introduction

      Note
      The material presented here is also discussed in video lecture 19, video lecture 20, video lecture 21. (All video lectures are also available here.)

      This program is devoted to two aspects: the use of mixed finite elements – in particular Raviart-Thomas elements – and using block matrices to define solvers, preconditioners, and nested versions of those that use the substructure of the system matrix. The equation we are going to solve is again the Poisson equation, though with a matrix-valued coefficient:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot K({\mathbf x}) \nabla p &=& f \qquad {\textrm{in}\ } \Omega, \\
   p &=& g \qquad {\textrm{on}\ }\partial\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3080.png"/>

      -

      $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

      +

      $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

      After discussing the equation and the formulation we are going to use to solve it, this introduction will cover the use of block matrices and vectors, the definition of solvers and preconditioners, and finally the actual test case we are going to solve.

      We are going to extend this tutorial program in step-21 to solve not only the mixed Laplace equation, but add another equation that describes the transport of a mixture of two fluids.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      The equations

      In the form above, the Poisson equation (i.e., the Laplace equation with a nonzero right hand side) is generally considered a good model equation for fluid flow in porous media. Of course, one typically models fluid flow through the Navier-Stokes equations or, if fluid velocities are slow or the viscosity is large, the Stokes equations (which we cover in step-22). In the first of these two models, the forces that act are inertia and viscous friction, whereas in the second it is only viscous friction – i.e., forces that one fluid particle exerts on a nearby one. This is appropriate if you have free flow in a large domain, say a pipe, a river, or in the air. On the other hand, if the fluid is confined in pores, then friction forces exerted by the pore walls on the fluid become more and more important and internal viscous friction becomes less and less important. Modeling this then first leads to the Brinkman model if both effects are important, and in the limit of very small pores to the Darcy equations. The latter is just a different name for the Poisson or Laplace equation, connotating it with the area to which one wants to apply it: slow flow in a porous medium. In essence it says that the velocity is proportional to the negative pressure gradient that drives the fluid through the porous medium.

      -

      The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

      +

      The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

      In the applications named above, a desirable feature for a numerical scheme is that it should be locally conservative, i.e., that whatever flows into a cell also flows out of it (or the difference is equal to the integral over the source terms over each cell, if the sources are nonzero). However, as it turns out, the usual discretizations of the Laplace equation (such as those used in step-3, step-4, or step-6) do not satisfy this property. But, one can achieve this by choosing a different formulation of the problem and a particular combination of finite element spaces.

      Formulation, weak form, and discrete problem

      -

      To this end, one first introduces a second variable, called the velocity, ${\mathbf u}=-K\nabla p$. By its definition, the velocity is a vector in the negative direction of the pressure gradient, multiplied by the permeability tensor. If the permeability tensor is proportional to the unit matrix, this equation is easy to understand and intuitive: the higher the permeability, the higher the velocity; and the velocity is proportional to the gradient of the pressure, going from areas of high pressure to areas of low pressure (thus the negative sign).

      +

      To this end, one first introduces a second variable, called the velocity, ${\mathbf u}=-K\nabla p$. By its definition, the velocity is a vector in the negative direction of the pressure gradient, multiplied by the permeability tensor. If the permeability tensor is proportional to the unit matrix, this equation is easy to understand and intuitive: the higher the permeability, the higher the velocity; and the velocity is proportional to the gradient of the pressure, going from areas of high pressure to areas of low pressure (thus the negative sign).

      With this second variable, one then finds an alternative version of the Laplace equation, called the mixed formulation:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   K^{-1} {\mathbf u} + \nabla p &=& 0 \qquad {\textrm{in}\ } \Omega, \\
   -{\textrm{div}}\ {\mathbf u} &=& -f \qquad {\textrm{in}\ }\Omega, \\
   p &=& g \qquad {\textrm{on}\ } \partial\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3087.png"/>

      -

      Here, we have multiplied the equation defining the velocity ${\mathbf
-u}$ by $K^{-1}$ because this makes the set of equations symmetric: one of the equations has the gradient, the second the negative divergence, and these two are of course adjoints of each other, resulting in a symmetric bilinear form and a consequently symmetric system matrix under the common assumption that $K$ is a symmetric tensor.

      +

      Here, we have multiplied the equation defining the velocity ${\mathbf
+u}$ by $K^{-1}$ because this makes the set of equations symmetric: one of the equations has the gradient, the second the negative divergence, and these two are of course adjoints of each other, resulting in a symmetric bilinear form and a consequently symmetric system matrix under the common assumption that $K$ is a symmetric tensor.

      The weak formulation of this problem is found by multiplying the two equations with test functions and integrating some terms by parts:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   A(\{{\mathbf u},p\},\{{\mathbf v},q\}) = F(\{{\mathbf v},q\}),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3090.png"/>

      where

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   A(\{{\mathbf u},p\},\{{\mathbf v},q\})
   &=&
   ({\mathbf v}, K^{-1}{\mathbf u})_\Omega - ({\textrm{div}}\ {\mathbf v}, p)_\Omega
   - (q,{\textrm{div}}\ {\mathbf u})_\Omega
   \\
   F(\{{\mathbf v},q\}) &=& -(g,{\mathbf v}\cdot {\mathbf n})_{\partial\Omega} - (f,q)_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3091.png"/>

      -

      Here, ${\mathbf n}$ is the outward normal vector at the boundary. Note how in this formulation, Dirichlet boundary values of the original problem are incorporated in the weak form.

      -

      To be well-posed, we have to look for solutions and test functions in the space $H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

      -

      To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
-u}_h,p_h$ so that

      -\begin{eqnarray*}
+<p> Here, <picture><source srcset=${\mathbf n}$ is the outward normal vector at the boundary. Note how in this formulation, Dirichlet boundary values of the original problem are incorporated in the weak form.

      +

      To be well-posed, we have to look for solutions and test functions in the space $H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

      +

      To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
+u}_h,p_h$ so that

      +\begin{eqnarray*}
   A(\{{\mathbf u}_h,p_h\},\{{\mathbf v}_h,q_h\}) = F(\{{\mathbf v}_h,q_h\})
   \qquad\qquad \forall {\mathbf v}_h,q_h.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3099.png"/>

      -

      Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

      -\begin{eqnarray*}
+<p>Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function <picture><source srcset=$q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

      +\begin{eqnarray*}
   - (1,{\textrm{div}}\ {\mathbf u}_h)_K
   =
   -(1,f)_K,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3102.png"/>

      which we can of course write in more explicit form as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \int_K {\textrm{div}}\ {\mathbf u}_h
  =
   \int_K f.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3103.png"/>

      -

      Applying the divergence theorem results in the fact that ${\mathbf
-u}_h$ has to satisfy, for every choice of cell $K$, the relationship

      -\begin{eqnarray*}
+<p> Applying the divergence theorem results in the fact that <picture><source srcset=${\mathbf
+u}_h$ has to satisfy, for every choice of cell $K$, the relationship

      +\begin{eqnarray*}
   \int_{\partial K} {\mathbf u}_h\cdot{\mathbf n}
   =
   \int_K f.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3105.png"/>

      -

      If you now recall that ${\mathbf u}$ was the velocity, then the integral on the left is exactly the (discrete) flux across the boundary of the cell $K$. The statement is then that the flux must be equal to the integral over the sources within $K$. In particular, if there are no sources (i.e., $f=0$ in $K$), then the statement is that total flux is zero, i.e., whatever flows into a cell must flow out of it through some other part of the cell boundary. This is what we call local conservation because it holds for every cell.

      -

      On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

      +

      If you now recall that ${\mathbf u}$ was the velocity, then the integral on the left is exactly the (discrete) flux across the boundary of the cell $K$. The statement is then that the flux must be equal to the integral over the sources within $K$. In particular, if there are no sources (i.e., $f=0$ in $K$), then the statement is that total flux is zero, i.e., whatever flows into a cell must flow out of it through some other part of the cell boundary. This is what we call local conservation because it holds for every cell.

      +

      On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

      Assembling the linear system

      -

      The deal.II library (of course) implements Raviart-Thomas elements $RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

      -\begin{eqnarray*}
+<p>The deal.II library (of course) implements Raviart-Thomas elements <picture><source srcset=$RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

      +\begin{eqnarray*}
   A(x_h,w_h) = F(w_h),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3109.png"/>

      -

      with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

      -

      Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

      -

      This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

      +

      with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

      +

      Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

      +

      This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

      So what do we have to do for non-primitive elements? To figure this out, let us go back in the tutorial programs, almost to the very beginnings. There, we learned that we use the FEValues class to determine the values and gradients of shape functions at quadrature points. For example, we would call fe_values.shape_value(i,q_point) to obtain the value of the ith shape function on the quadrature point with number q_point. Later, in step-8 and other tutorial programs, we learned that this function call also works for vector-valued shape functions (of primitive finite elements), and that it returned the value of the only non-zero component of shape function i at quadrature point q_point.

      -

      For non-primitive shape functions, this is clearly not going to work: there is no single non-zero vector component of shape function i, and the call to fe_values.shape_value(i,q_point) would consequently not make much sense. However, deal.II offers a second function call, fe_values.shape_value_component(i,q_point,comp) that returns the value of the compth vector component of shape function i at quadrature point q_point, where comp is an index between zero and the number of vector components of the present finite element; for example, the element we will use to describe velocities and pressures is going to have $dim+1$ components. It is worth noting that this function call can also be used for primitive shape functions: it will simply return zero for all components except one; for non-primitive shape functions, it will in general return a non-zero value for more than just one component.

      -

      We could now attempt to rewrite the bilinear form above in terms of vector components. For example, in 2d, the first term could be rewritten like this (note that $u_0=x_0, u_1=x_1, p=x_2$):

      -\begin{eqnarray*}
+<p>For non-primitive shape functions, this is clearly not going to work: there is no single non-zero vector component of shape function <code>i</code>, and the call to <code>fe_values.shape_value(i,q_point)</code> would consequently not make much sense. However, deal.II offers a second function call, <code>fe_values.shape_value_component(i,q_point,comp)</code> that returns the value of the <code>comp</code>th vector component of shape function <code>i</code> at quadrature point <code>q_point</code>, where <code>comp</code> is an index between zero and the number of vector components of the present finite element; for example, the element we will use to describe velocities and pressures is going to have <picture><source srcset=$dim+1$ components. It is worth noting that this function call can also be used for primitive shape functions: it will simply return zero for all components except one; for non-primitive shape functions, it will in general return a non-zero value for more than just one component.

      +

      We could now attempt to rewrite the bilinear form above in terms of vector components. For example, in 2d, the first term could be rewritten like this (note that $u_0=x_0, u_1=x_1, p=x_2$):

      +\begin{eqnarray*}
   ({\mathbf u}_h^i, K^{-1}{\mathbf u}_h^j)
   =
   &\left((x_h^i)_0, K^{-1}_{00} (x_h^j)_0\right) +
    \left((x_h^i)_0, K^{-1}_{01} (x_h^j)_1\right) + \\
   &\left((x_h^i)_1, K^{-1}_{10} (x_h^j)_0\right) +
    \left((x_h^i)_1, K^{-1}_{11} (x_h^j)_1\right).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3119.png"/>

      If we implemented this, we would get code like this:

      for (unsigned int q=0; q<n_q_points; ++q)
      @@ -263,7 +263,7 @@
      fe_values.shape_value_component(j,q,1)
      ) *
      fe_values.JxW(q);
      -

      This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

      +

      This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

      This is, in fact, not only the first term of the bilinear form, but the whole thing (sans boundary contributions).

      -

      What this piece of code does is, given an fe_values object, to extract the values of the first $dim$ components of shape function i at quadrature points q, that is the velocity components of that shape function. Put differently, if we write shape functions $x_h^i$ as the tuple $\{{\mathbf u}_h^i,p_h^i\}$, then the function returns the velocity part of this tuple. Note that the velocity is of course a dim-dimensional tensor, and that the function returns a corresponding object. Similarly, where we subscript with the pressure extractor, we extract the scalar pressure component. The whole mechanism is described in more detail in the Handling vector valued problems module.

      +

      What this piece of code does is, given an fe_values object, to extract the values of the first $dim$ components of shape function i at quadrature points q, that is the velocity components of that shape function. Put differently, if we write shape functions $x_h^i$ as the tuple $\{{\mathbf u}_h^i,p_h^i\}$, then the function returns the velocity part of this tuple. Note that the velocity is of course a dim-dimensional tensor, and that the function returns a corresponding object. Similarly, where we subscript with the pressure extractor, we extract the scalar pressure component. The whole mechanism is described in more detail in the Handling vector valued problems module.

      In practice, it turns out that we can do a bit better if we evaluate the shape functions, their gradients and divergences only once per outermost loop, and store the result, as this saves us a few otherwise repeated computations (it is possible to save even more repeated operations by calculating all relevant quantities in advance and then only inserting the results in the actual loop, see step-22 for a realization of that approach). The final result then looks like this, working in every space dimension:

      for (const auto &cell : dof_handler.active_cell_iterators())
      {
      @@ -323,7 +323,7 @@
      }

      This very closely resembles the form in which we have originally written down the bilinear form and right hand side.

      -

      There is one final term that we have to take care of: the right hand side contained the term $(g,{\mathbf v}\cdot {\mathbf n})_{\partial\Omega}$, constituting the weak enforcement of pressure boundary conditions. We have already seen in step-7 how to deal with face integrals: essentially exactly the same as with domain integrals, except that we have to use the FEFaceValues class instead of FEValues. To compute the boundary term we then simply have to loop over all boundary faces and integrate there. The mechanism works in the same way as above, i.e. the extractor classes also work on FEFaceValues objects:

      +

      There is one final term that we have to take care of: the right hand side contained the term $(g,{\mathbf v}\cdot {\mathbf n})_{\partial\Omega}$, constituting the weak enforcement of pressure boundary conditions. We have already seen in step-7 how to deal with face integrals: essentially exactly the same as with domain integrals, except that we have to use the FEFaceValues class instead of FEValues. To compute the boundary term we then simply have to loop over all boundary faces and integrate there. The mechanism works in the same way as above, i.e. the extractor classes also work on FEFaceValues objects:

      for (const auto &face : cell->face_iterators())
      if (face->at_boundary())
      {
      @@ -341,15 +341,15 @@

      You will find the exact same code as above in the sources for the present program. We will therefore not comment much on it below.

      Linear solvers and preconditioners

      After assembling the linear system we are faced with the task of solving it. The problem here is that the matrix possesses two undesirable properties:

        -
      • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
      • -
      • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).
      • +
      • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
      • +
      • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).

      At least it is symmetric, but the first issue above still means that the Conjugate Gradient method is not going to work since it is only applicable to problems in which the matrix is symmetric and positive definite. We would have to resort to other iterative solvers instead, such as MinRes, SymmLQ, or GMRES, that can deal with indefinite systems. However, then the next problem immediately surfaces: Due to the zero block, there are zeros on the diagonal and none of the usual, "simple" preconditioners (Jacobi, SSOR) will work as they require division by diagonal elements.

      For the matrix sizes we expect to run with this program, the by far simplest approach would be to just use a direct solver (in particular, the SparseDirectUMFPACK class that is bundled with deal.II). step-29 goes this route and shows that solving any linear system can be done in just 3 or 4 lines of code.

      But then, this is a tutorial: We teach how to do things. Consequently, in the following, we will introduce some techniques that can be used in cases like these. Namely, we will consider the linear system as not consisting of one large matrix and vectors, but we will want to decompose matrices into blocks that correspond to the individual operators that appear in the system. We note that the resulting solver is not optimal – there are much better ways to efficiently compute the system, for example those explained in the results section of step-22 or the one we use in step-43 for a problem similar to the current one. Here, our goal is simply to introduce new solution techniques and how they can be implemented in deal.II.

      Solving using the Schur complement

      In view of the difficulties using standard solvers and preconditioners mentioned above, let us take another look at the matrix. If we sort our degrees of freedom so that all velocity come before all pressure variables, then we can subdivide the linear system $Ax=b$ into the following blocks:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B \\ B^T & 0
   \end{array}\right)
@@ -360,26 +360,26 @@
   \left(\begin{array}{cc}
     F \\ G
   \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3124.png"/>

      -

      where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

      +

      where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

      By block elimination, we can then re-order this system in the following way (multiply the first row of the system by $B^TM^{-1}$ and then subtract the second row from it):

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   B^TM^{-1}B P &=& B^TM^{-1} F - G, \\
/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html	2024-03-17 21:57:44.423240351 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html	2024-03-17 21:57:44.423240351 +0000
@@ -155,113 +155,113 @@
 <p>The equations covered here are an extension of the material already covered in <a class=step-20. In particular, they fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      The two phase flow problem

      Modeling of two phase flow in porous media is important for both environmental remediation and the management of petroleum and groundwater reservoirs. Practical situations involving two phase flow include the dispersal of a nonaqueous phase liquid in an aquifer, or the joint movement of a mixture of fluids such as oil and water in a reservoir. Simulation models, if they are to provide realistic predictions, must accurately account for these effects.

      -

      To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

      +

      To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

      The velocity with which molecules of each of the two phases move is determined by Darcy's law that states that the velocity is proportional to the pressure gradient:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}_{j}
   =
   -\frac{k_{rj}(S)}{\mu_{j}} \mathbf{K} \cdot \nabla p
-\end{eqnarray*} +\end{eqnarray*}" src="form_3156.png"/>

      -

      where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

      +

      where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

      We combine Darcy's law with the statement of conservation of mass for each phase,

      -\[
+<picture><source srcset=\[
   \textrm{div}\ \mathbf{u}_{j} = q_j,
-\] +\]" src="form_3162.png"/>

      with a source term for each phase. By summing over the two phases, we can express the governing equations in terms of the so-called pressure equation:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p)= q.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3163.png"/>

      -

      Here, $q$ is the sum source term, and

      -\[
+<p> Here, <picture><source srcset=$q$ is the sum source term, and

      +\[
   \lambda(S) = \frac{k_{rw}(S)}{\mu_{w}}+\frac{k_{ro}(S)}{\mu_{o}}
-\] +\]" src="form_3164.png"/>

      is the total mobility.

      So far, this looks like an ordinary stationary, Poisson-like equation that we can solve right away with the techniques of the first few tutorial programs (take a look at step-6, for example, for something very similar). However, we have not said anything yet about the saturation, which of course is going to change as the fluids move around.

      The second part of the equations is the description of the dynamics of the saturation, i.e., how the relative concentration of the two fluids changes with time. The saturation equation for the displacing fluid (water) is given by the following conservation law:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   S_{t} + \nabla \cdot (F(S) \mathbf{u}) = q_{w},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3165.png"/>

      which can be rewritten by using the product rule of the divergence operator in the previous equation:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   S_{t} + F(S) \left[\nabla \cdot \mathbf{u}\right]
         + \mathbf{u} \cdot \left[ \nabla F(S)\right]
   = S_{t} + F(S) q + \mathbf{u} \cdot \nabla F(S) = q_{w}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3166.png"/>

      -

      Here, $q=\nabla\cdot \mathbf{u}$ is the total influx introduced above, and $q_{w}$ is the flow rate of the displacing fluid (water). These two are related to the fractional flow $F(S)$ in the following way:

      -\[
+<p> Here, <picture><source srcset=$q=\nabla\cdot \mathbf{u}$ is the total influx introduced above, and $q_{w}$ is the flow rate of the displacing fluid (water). These two are related to the fractional flow $F(S)$ in the following way:

      +\[
   q_{w} = F(S) q,
-\] +\]" src="form_3170.png"/>

      where the fractional flow is often parameterized via the (heuristic) expression

      -\[
+<picture><source srcset=\[
   F(S)
   =
   \frac{k_{rw}(S)/\mu_{w}}{k_{rw}(S)/\mu_{w} + k_{ro}(S)/\mu_{o}}.
-\] +\]" src="form_3171.png"/>

      Putting it all together yields the saturation equation in the following, advected form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   S_{t} + \mathbf{u} \cdot \nabla F(S) = 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3172.png"/>

      where $\mathbf u$ is the total velocity

      -\[
+<picture><source srcset=\[
   \mathbf{u} =
   \mathbf{u}_{o} + \mathbf{u}_{w} = -\lambda(S) \mathbf{K}\cdot\nabla p.
-\] +\]" src="form_3173.png"/>

      -

      Note that the advection equation contains the term $\mathbf{u} \cdot \nabla
-F(S)$ rather than $\mathbf{u} \cdot \nabla S$ to indicate that the saturation is not simply transported along; rather, since the two phases move with different velocities, the saturation can actually change even in the advected coordinate system. To see this, rewrite $\mathbf{u} \cdot \nabla F(S)
-= \mathbf{u} F'(S) \cdot \nabla S$ to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

      +

      Note that the advection equation contains the term $\mathbf{u} \cdot \nabla
+F(S)$ rather than $\mathbf{u} \cdot \nabla S$ to indicate that the saturation is not simply transported along; rather, since the two phases move with different velocities, the saturation can actually change even in the advected coordinate system. To see this, rewrite $\mathbf{u} \cdot \nabla F(S)
+= \mathbf{u} F'(S) \cdot \nabla S$ to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

      In summary, what we get are the following two equations:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p) &=& q
   \qquad \textrm{in}\ \Omega\times[0,T],
   \\
   S_{t} + \mathbf{u} \cdot \nabla F(S) &=& 0
   \qquad \textrm{in}\ \Omega\times[0,T].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3179.png"/>

      -

      Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

      +

      Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

      This set of equations has a peculiar character: one of the two equations has a time derivative, the other one doesn't. This corresponds to the character that the pressure and velocities are coupled through an instantaneous constraint, whereas the saturation evolves over finite time scales.

      -

      Such systems of equations are called Differential Algebraic Equations (DAEs), since one of the equations is a differential equation, the other is not (at least not with respect to the time variable) and is therefore an "algebraic" equation. (The notation comes from the field of ordinary differential equations, where everything that does not have derivatives with respect to the time variable is necessarily an algebraic equation.) This class of equations contains pretty well-known cases: for example, the time dependent Stokes and Navier-Stokes equations (where the algebraic constraint is that the divergence of the flow field, $\textrm{div}\ \mathbf u$, must be zero) as well as the time dependent Maxwell equations (here, the algebraic constraint is that the divergence of the electric displacement field equals the charge density, $\textrm{div}\ \mathbf D = \rho$ and that the divergence of the magnetic flux density is zero: $\textrm{div}\ \mathbf
-B = 0$); even the quasistatic model of step-18 falls into this category. We will see that the different character of the two equations will inform our discretization strategy for the two equations.

      +

      Such systems of equations are called Differential Algebraic Equations (DAEs), since one of the equations is a differential equation, the other is not (at least not with respect to the time variable) and is therefore an "algebraic" equation. (The notation comes from the field of ordinary differential equations, where everything that does not have derivatives with respect to the time variable is necessarily an algebraic equation.) This class of equations contains pretty well-known cases: for example, the time dependent Stokes and Navier-Stokes equations (where the algebraic constraint is that the divergence of the flow field, $\textrm{div}\ \mathbf u$, must be zero) as well as the time dependent Maxwell equations (here, the algebraic constraint is that the divergence of the electric displacement field equals the charge density, $\textrm{div}\ \mathbf D = \rho$ and that the divergence of the magnetic flux density is zero: $\textrm{div}\ \mathbf
+B = 0$); even the quasistatic model of step-18 falls into this category. We will see that the different character of the two equations will inform our discretization strategy for the two equations.

      Time discretization

      In the reservoir simulation community, it is common to solve the equations derived above by going back to the first order, mixed formulation. To this end, we re-introduce the total velocity $\mathbf u$ and write the equations in the following form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}+\mathbf{K}\lambda(S) \nabla p&=&0 \\
   \nabla \cdot\mathbf{u} &=& q \\
   S_{t} + \mathbf{u} \cdot \nabla F(S) &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3184.png"/>

      This formulation has the additional benefit that we do not have to express the total velocity $\mathbf u$ appearing in the transport equation as a function of the pressure, but can rather take the primary variable for it. Given the saddle point structure of the first two equations and their similarity to the mixed Laplace formulation we have introduced in step-20, it will come as no surprise that we will use a mixed discretization again.

      But let's postpone this for a moment. The first business we have with these equations is to think about the time discretization. In reservoir simulation, there is a rather standard algorithm that we will use here. It first solves the pressure using an implicit equation, then the saturation using an explicit time stepping scheme. The algorithm is called IMPES for IMplicit Pressure Explicit Saturation and was first proposed a long time ago: by Sheldon et al. in 1959 and Stone and Gardner in 1961 (J. W. Sheldon, B. Zondek and W. T. Cardwell: One-dimensional, incompressible, non-capillary, two-phase fluid flow in a porous medium, Trans. SPE AIME, 216 (1959), pp. 290-296; H. L. Stone and A. O. Gardner Jr: Analysis of gas-cap or dissolved-gas reservoirs, Trans. SPE AIME, 222 (1961), pp. 92-104). In a slightly modified form, this algorithm can be written as follows: for each time step, solve

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}^{n+1}+\mathbf{K}\lambda(S^n) \nabla p^{n+1}&=&0 \\
   \nabla \cdot\mathbf{u}^{n+1} &=& q^{n+1} \\
   \frac {S^{n+1}-S^n}{\triangle t} + \mathbf{u}^{n+1} \cdot \nabla F(S^n) &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3185.png"/>

      -

      where $\triangle t$ is the length of a time step. Note how we solve the implicit pressure-velocity system that only depends on the previously computed saturation $S^n$, and then do an explicit time step for $S^{n+1}$ that only depends on the previously known $S^n$ and the just computed $\mathbf{u}^{n+1}$. This way, we never have to iterate for the nonlinearities of the system as we would have if we used a fully implicit method. (In a more modern perspective, this should be seen as an "operator +

      where $\triangle t$ is the length of a time step. Note how we solve the implicit pressure-velocity system that only depends on the previously computed saturation $S^n$, and then do an explicit time step for $S^{n+1}$ that only depends on the previously known $S^n$ and the just computed $\mathbf{u}^{n+1}$. This way, we never have to iterate for the nonlinearities of the system as we would have if we used a fully implicit method. (In a more modern perspective, this should be seen as an "operator splitting" method. step-58 has a long description of the idea behind this.)

      -

      We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

      -\begin{eqnarray*}
+<p>We can then state the problem in weak form as follows, by multiplying each equation with test functions <picture><source srcset=$\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

      +\begin{eqnarray*}
   \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{u}^{n+1},\mathbf v\right)_\Omega -
   (p^{n+1}, \nabla\cdot\mathbf v)_\Omega &=&
   - (p^{n+1}, \mathbf v)_{\partial\Omega}
   \\
   (\nabla \cdot\mathbf{u}^{n+1}, \phi)_\Omega &=& (q^{n+1},\phi)_\Omega
-\end{eqnarray*} +\end{eqnarray*}" src="form_3190.png"/>

      -

      Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

      +

      Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

      For the saturation equation, we obtain after integrating by parts

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (S^{n+1}, \sigma)_\Omega
   -
   \triangle t
@@ -273,10 +273,10 @@
   \right\}
   &=&
   (S^n,\sigma)_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3194.png"/>

      -

      Using the fact that $\nabla \cdot \mathbf{u}^{n+1}=q^{n+1}$, we can rewrite the cell term to get an equation as follows:

      -\begin{eqnarray*}
+<p> Using the fact that <picture><source srcset=$\nabla \cdot \mathbf{u}^{n+1}=q^{n+1}$, we can rewrite the cell term to get an equation as follows:

      +\begin{eqnarray*}
   (S^{n+1}, \sigma)_\Omega
   -
   \triangle t
@@ -289,26 +289,26 @@
   &=&
   (S^n,\sigma)_\Omega +
   \triangle t \sum_K  \left(F(S^n) q^{n+1}, \sigma\right)_K.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3196.png"/>

      We introduce an object of type DiscreteTime in order to keep track of the current value of time and time step in the code. This class encapsulates many complexities regarding adjusting time step size and stopping at a specified final time.

      Space discretization

      -

      In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

      +

      In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

      Since we have discontinuous spaces, we have to think about how to evaluate terms on the interfaces between cells, since discontinuous functions are not really defined there. In particular, we have to give a meaning to the last term on the left hand side of the saturation equation. To this end, let us define that we want to evaluate it in the following sense:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   &&\left(F(S^n) (\mathbf n \cdot \mathbf{u}^{n+1}), \sigma\right)_{\partial K}
   \\
   &&\qquad =
   \left(F(S^n_+) (\mathbf n \cdot \mathbf{u}^{n+1}_+), \sigma\right)_{\partial K_+}
   +
   \left(F(S^n_-) (\mathbf n \cdot \mathbf{u}^{n+1}_-), \sigma\right)_{\partial K_-},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3199.png"/>

      -

      where $\partial K_{-} \dealcoloneq \{x\in \partial K, \mathbf{u}(x) \cdot \mathbf{n}<0\}$ denotes the inflow boundary and $\partial K_{+} \dealcoloneq \{\partial K \setminus
-\partial K_{-}\}$ is the outflow part of the boundary. The quantities $S_+,\mathbf{u}_+$ then correspond to the values of these variables on the present cell, whereas $S_-,\mathbf{u}_-$ (needed on the inflow part of the boundary of $K$) are quantities taken from the neighboring cell. Some more context on discontinuous element techniques and evaluation of fluxes can also be found in step-12 and step-12b.

      +

      where $\partial K_{-} \dealcoloneq \{x\in \partial K, \mathbf{u}(x) \cdot \mathbf{n}<0\}$ denotes the inflow boundary and $\partial K_{+} \dealcoloneq \{\partial K \setminus
+\partial K_{-}\}$ is the outflow part of the boundary. The quantities $S_+,\mathbf{u}_+$ then correspond to the values of these variables on the present cell, whereas $S_-,\mathbf{u}_-$ (needed on the inflow part of the boundary of $K$) are quantities taken from the neighboring cell. Some more context on discontinuous element techniques and evaluation of fluxes can also be found in step-12 and step-12b.

      Linear solvers

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-03-17 21:57:44.519240944 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-03-17 21:57:44.523240969 +0000 @@ -167,36 +167,36 @@ This material is based upon work partly supported by the National Science Foundation under Award No. EAR-0426271 and The California Institute of Technology. Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the author and do not necessarily reflect the views of the National Science Foundation or of The California Institute of Technology.

      Introduction

      This program deals with the Stokes system of equations which reads as follows in non-dimensionalized form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p &=& \textbf{f},
   \\
   -\textrm{div}\; \textbf{u} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3249.png"/>

      -

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
-(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
-12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      +

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
+(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
+12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      The Stokes equations describe the steady-state motion of a slow-moving, viscous fluid such as honey, rocks in the earth mantle, or other cases where inertia does not play a significant role. If a fluid is moving fast enough that inertia forces are significant compared to viscous friction, the Stokes equations are no longer valid; taking into account inertia effects then leads to the nonlinear Navier-Stokes equations. However, in this tutorial program, we will focus on the simpler Stokes system.

      Note that when deriving the more general compressible Navier-Stokes equations, the diffusion is modeled as the divergence of the stress tensor

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \tau = - \mu \left(2\varepsilon(\textbf{u}) - \frac{2}{3}\nabla \cdot \textbf{u} I\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3253.png"/>

      -

      where $\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      +\begin{eqnarray*}
   \textrm{div}\; \tau = -2\textrm{div}\;\varepsilon(\textbf{u}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3256.png"/>

      -

      A different formulation uses the Laplace operator ( $-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      -\begin{eqnarray*}
+<p> A different formulation uses the Laplace operator ( <picture><source srcset=$-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      +\begin{eqnarray*}
   \textrm{div}\; \tau
   = -2\textrm{div}\;\varepsilon(\textbf{u})
   = -\triangle \textbf{u} - \nabla \cdot (\nabla\textbf{u})^T
   = -\triangle \textbf{u}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3259.png"/>

      -

      This is because the $i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      -\begin{eqnarray*}
+<p> This is because the <picture><source srcset=$i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      +\begin{eqnarray*}
 [\nabla \cdot (\nabla\textbf{u})^T]_i
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})^T]_{i,j}
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})]_{j,i}
@@ -205,14 +205,14 @@
 = \frac{\partial}{\partial x_i}
   \underbrace{\textrm{div}\; \textbf{u}}_{=0}
 = 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3261.png"/>

      If you can not assume the above mentioned regularity, or if your viscosity is not a constant, the equivalence no longer holds. Therefore, we decided to stick with the more physically accurate symmetric tensor formulation in this tutorial.

      To be well-posed, we will have to add boundary conditions to the equations. What boundary conditions are readily possible here will become clear once we discuss the weak form of the equations.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      Weak form

      The weak form of the equations is obtained by writing it in vector form as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \begin{pmatrix}
     {-2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p}
     \\
@@ -224,23 +224,23 @@
   \\
   0
   \end{pmatrix},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3262.png"/>

      -

      forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      -\begin{eqnarray*}
+<p> forming the dot product from the left with a vector-valued test function <picture><source srcset=$\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      +\begin{eqnarray*}
   (\mathrm v,
    -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3264.png"/>

      -

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

      +

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

      A generally good rule of thumb is that if one can reduce how many derivatives are taken on any variable in the formulation, then one should in fact do that using integration by parts. (This is motivated by the theory of partial differential equations, and in particular the difference between strong and weak solutions.) We have already done that for the Laplace equation, where we have integrated the second derivative by parts to obtain the weak formulation that has only one derivative on both test and trial function.

      In the current context, we integrate by parts the second term:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\textbf{v}, -2\; \textrm{div}\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   + (\textbf{n}\cdot\textbf{v}, p)_{\partial\Omega}
@@ -248,10 +248,10 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3266.png"/>

      Likewise, we integrate by parts the first term to obtain

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -261,19 +261,19 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3267.png"/>

      where the scalar product between two tensor-valued quantities is here defined as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   =
   2 \int_\Omega \sum_{i,j=1}^d \frac{\partial v_j}{\partial x_i}
   \varepsilon(\textbf{u})_{ij} \ dx.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3268.png"/>

      -

      Using this, we have now reduced the requirements on our variables to first derivatives for $\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      -

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      -\begin{eqnarray*}
+<p> Using this, we have now reduced the requirements on our variables to first derivatives for <picture><source srcset=$\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      +

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      +\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -283,43 +283,43 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3272.png"/>

      We will deal with the boundary terms in the next section, but it is already clear from the domain terms

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3273.png"/>

      of the bilinear form that the Stokes equations yield a symmetric bilinear form, and consequently a symmetric (if indefinite) system matrix.

      Boundary conditions

      Note
      The material presented here is also discussed in video lecture 21.5. (All video lectures are also available here.) (See also video lecture 21.55, video lecture 21.6, video lecture 21.65.)

      The weak form just derived immediately presents us with different possibilities for imposing boundary conditions:

      1. -

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        +

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \textbf u = \textbf g_D \qquad\qquad \textrm{on}\ \Gamma_D.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3275.png"/>

        -

        Because test functions $\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        -\begin{eqnarray*}
+<p> Because test functions <picture><source srcset=$\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_D}
       +
       (\textbf{n}\cdot\textbf{v}, p)_{\Gamma_D}
       = 0.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3279.png"/>

        In other words, as usual, strongly imposed boundary values do not appear in the weak form.

        It is noteworthy that if we impose Dirichlet boundary values on the entire boundary, then the pressure is only determined up to a constant. An algorithmic realization of that would use similar tools as have been seen in step-11.

      2. -

        Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        -\begin{eqnarray*}
+<p class=Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_N}
       +
@@ -349,17 +349,17 @@
       &=&
       (\textbf{v},
        \textbf{n}\cdot [p \textbf{I} - 2\; \varepsilon(\textbf{u})])_{\Gamma_N}.
/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-03-17 21:57:44.583241340 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-03-17 21:57:44.587241365 +0000
@@ -132,8 +132,8 @@
  <a class=

        Introduction

        Note
        The material presented here is also discussed in video lecture 28. (All video lectures are also available here.)

        This is the first of a number of tutorial programs that will finally cover "real" time-dependent problems, not the slightly odd form of time dependence found in step-18 or the DAE model of step-21. In particular, this program introduces the wave equation in a bounded domain. Later, step-24 will consider an example of absorbing boundary conditions, and step-25 a kind of nonlinear wave equation producing solutions called solitons.

        -

        The wave equation in its prototypical form reads as follows: find $u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        -\begin{eqnarray*}
+<p>The wave equation in its prototypical form reads as follows: find <picture><source srcset=$u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        +\begin{eqnarray*}
         \frac{\partial^2 u}{\partial t^2}
         -
         \Delta u &=& f
@@ -151,10 +151,10 @@
         \frac{\partial u(x,0)}{\partial t} &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3358.png"/>

        Note that since this is an equation with second-order time derivatives, we need to pose two initial conditions, one for the value and one for the time derivative of the solution.

        -

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        +

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        Time discretization

        Method of lines or Rothe's method?

        There is a long-standing debate in the numerical analysis community over whether a discretization of time dependent equations should involve first discretizing the time variable leading to a stationary PDE at each time step that is then solved using standard finite element techniques (this is called the Rothe method), or whether one should first discretize the spatial variables, leading to a large system of ordinary differential equations that can then be handled by one of the usual ODE solvers (this is called the method of lines).

        @@ -167,12 +167,12 @@

        Rothe's method!

        Given these considerations, here is how we will proceed: let us first define a simple time stepping method for this second order problem, and then in a second step do the spatial discretization, i.e. we will follow Rothe's approach.

        For the first step, let us take a little detour first: in order to discretize a second time derivative, we can either discretize it directly, or we can introduce an additional variable and transform the system into a first order system. In many cases, this turns out to be equivalent, but dealing with first order systems is often simpler. To this end, let us introduce

        -\[
+<picture><source srcset=\[
         v = \frac{\partial u}{\partial t},
-\] +\]" src="form_3360.png"/>

        and call this variable the velocity for obvious reasons. We can then reformulate the original wave equation as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \frac{\partial u}{\partial t}
         -
         v
@@ -197,37 +197,37 @@
         v(x,0) &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3361.png"/>

        -

        The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for $v$ at first. However, we could enforce $v=\frac{\partial
-g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        -

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        -\begin{eqnarray*}
+<p> The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for <picture><source srcset=$v$ at first. However, we could enforce $v=\frac{\partial
+g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        +

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        +\begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k}
   - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,
   \\
   \frac{v^n - v^{n-1}}{k}
   - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& \theta f^n + (1-\theta) f^{n-1}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3364.png"/>

        -

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        -

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
-- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        -

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        -

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        -\begin{eqnarray*}
+<p> Note how we introduced a parameter <picture><source srcset=$\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        +

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
+- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        +

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        +

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        +\begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
          + k^2\theta\left[\theta f^n + (1-\theta) f^{n-1}\right],\\
    v^n &=& v^{n-1} + k\Delta\left[ \theta u^n + (1-\theta) u^{n-1}\right]
    + k\left[\theta f^n + (1-\theta) f^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3372.png"/>

        -

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        +

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        Space discretization

        -

        We have now derived equations that relate the approximate (semi-discrete) solution $u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        -\begin{eqnarray*}
+<p>We have now derived equations that relate the approximate (semi-discrete) solution <picture><source srcset=$u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        +\begin{eqnarray*}
   (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
   (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
   +
@@ -247,15 +247,15 @@
   \left[
   \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
   \right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3378.png"/>

        -

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
-U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
-v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
-U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
-V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        +

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
+U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
+v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
+U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
+V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        If we plug these expansions into above equations and test with the test functions from the present mesh, we get the following linear system:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (M^n + k^2\theta^2 A^n)U^n &=&
   M^{n,n-1}U^{n-1} - k^2\theta(1-\theta) A^{n,n-1}U^{n-1}
   +
@@ -275,10 +275,10 @@
   \left[
   \theta F^n + (1-\theta) F^{n-1}
   \right],
-\end{eqnarray*} +\end{eqnarray*}" src="form_3387.png"/>

        where

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         M^n_{ij} &=& (\phi_i^n, \phi_j^n),
         \\
         A^n_{ij} &=& (\nabla\phi_i^n, \nabla\phi_j^n),
@@ -290,14 +290,14 @@
         F^n_{i} &=& (f^n,\phi_i^n),
         \\
         F^{n-1}_{i} &=& (f^{n-1},\phi_i^n).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3388.png"/>

        If we solve these two equations, we can move the solution one step forward and go on to the next time step.

        -

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        +

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        Under these conditions (i.e. a mesh that doesn't change), one can optimize the solution procedure a bit by basically eliminating the solution of the second linear system. We will discuss this in the introduction of the step-25 program.

        Energy conservation

        -

        One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by $u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        -\[
+<p>One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by <picture><source srcset=$u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        +\[
         \frac{d}{d t}
         \left[\frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx\right]
@@ -306,34 +306,34 @@
         +
         \int_{\partial\Omega} n\cdot\nabla u
         \frac{\partial g}{\partial t} \; dx.
-\] +\]" src="form_3394.png"/>

        By consequence, in absence of body forces and constant boundary values, we get that

        -\[
+<picture><source srcset=\[
         E(t) = \frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx
-\] +\]" src="form_3395.png"/>

        -

        is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace $u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        -\[
+<p> is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace <picture><source srcset=$u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        +\[
         E(t_n) = \frac 12 \left<V^n, M^n V^n\right>
         +
         \frac 12 \left<U^n, A^n U^n\right>.
-\] +\]" src="form_3397.png"/>

        As we will see in the results section, the Crank-Nicolson scheme does indeed conserve the energy, whereas neither the forward nor the backward Euler scheme do.

        Who are Courant, Friedrichs, and Lewy?

        -

        One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width $h$. For the lowest order discretization we use here, the relationship reads

        -\[
+<p>One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width <picture><source srcset=$h$. For the lowest order discretization we use here, the relationship reads

        +\[
         k\le \frac hc
-\] +\]" src="form_3398.png"/>

        -

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        +

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        This condition was first recognized by Courant, Friedrichs, and Lewy — in 1928, long before computers became available for numerical computations! (This result appeared in the German language article R. Courant, K. Friedrichs and H. Lewy: Über die partiellen Differenzengleichungen der mathematischen Physik, Mathematische Annalen, vol. 100, no. 1, pages 32-74, 1928.) This condition on the time step is most frequently just referred to as the CFL condition. Intuitively, the CFL condition says that the time step must not be larger than the time it takes a wave to cross a single cell.

        -

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        +

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        The test case

        -

        Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square $[-1,1]^2$ and

        -\begin{eqnarray*}
+<p>Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square <picture><source srcset=$[-1,1]^2$ and

        +\begin{eqnarray*}
         f &=& 0,
         \\
         u_0 &=& 0,
@@ -347,7 +347,7 @@
/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-03-17 21:57:44.647241734 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-03-17 21:57:44.655241784 +0000
@@ -131,101 +131,101 @@
 <p><a class=

        The problem

        The temperature at a given location, neglecting thermal diffusion, can be stated as

        -\[
+<picture><source srcset=\[
 \rho C_p \frac{\partial}{\partial t}T(t,\mathbf r) = H(t,\mathbf r)
-\] +\]" src="form_3429.png"/>

        -

        Here $\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        -

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        -\[
+<p>Here <picture><source srcset=$\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        +

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        +\[
 \rho \frac{\partial^2}{\partial t^2}u(t,\mathbf r) =
 -\nabla p(t,\mathbf r).
-\] +\]" src="form_3434.png"/>

        Furthermore, it contracts due to excess pressure and expands based on changes in temperature:

        -\[
+<picture><source srcset=\[
 \nabla \cdot u(t,\mathbf r) = -\frac{p(t,\mathbf r)}{\rho c_0^2}+\beta T(t,\mathbf r) .
-\] +\]" src="form_3435.png"/>

        Here, $\beta$ is a thermoexpansion coefficient.

        -

        Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate $H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
-r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        -\[
+<p>Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate <picture><source srcset=$H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
+r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        +\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}
-\] +\]" src="form_3440.png"/>

        -

        where $\lambda = - \frac{\beta}{C_p}$.

        +

        where $\lambda = - \frac{\beta}{C_p}$.

        This somewhat strange equation with the derivative of a Dirac delta function on the right hand side can be rewritten as an initial value problem as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \Delta \bar{p}- \frac{1}{c_0^2} \frac{\partial^2 \bar{p}}{\partial t^2} & = &
 0 \\
 \bar{p}(0,\mathbf r) &=& c_0^2 \lambda a(\mathbf r) = b(\mathbf r)  \\
 \frac{\partial\bar{p}(0,\mathbf r)}{\partial t} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3442.png"/>

        (A derivation of this transformation into an initial value problem is given at the end of this introduction as an appendix.)

        -

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        +

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        In real application, the thermoacoustic source is very small as compared to the medium. The propagation path of the thermoacoustic waves can then be approximated as from the source to the infinity. Furthermore, detectors are only a limited distance from the source. One only needs to evaluate the values when the thermoacoustic waves pass through the detectors, although they do continue beyond. This is therefore a problem where we are only interested in a small part of an infinite medium, and we do not want waves generated somewhere to be reflected at the boundary of the domain which we consider interesting. Rather, we would like to simulate only that part of the wave field that is contained inside the domain of interest, and waves that hit the boundary of that domain to simply pass undisturbed through the boundary. In other words, we would like the boundary to absorb any waves that hit it.

        In general, this is a hard problem: Good absorbing boundary conditions are nonlinear and/or numerically very expensive. We therefore opt for a simple first order approximation to absorbing boundary conditions that reads

        -\[
+<picture><source srcset=\[
 \frac{\partial\bar{p}}{\partial\mathbf n} =
 -\frac{1}{c_0} \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3444.png"/>

        -

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        +

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        Weak form and discretization

        As in step-23, one first introduces a second variable, which is defined as the derivative of the pressure potential:

        -\[
+<picture><source srcset=\[
 v = \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3446.png"/>

        With the second variable, one then transforms the forward problem into two separate equations:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}_{t} - v & = & 0 \\
 \Delta\bar{p} - \frac{1}{c_0^2}\,v_{t} & = & f
-\end{eqnarray*} +\end{eqnarray*}" src="form_3447.png"/>

        with initial conditions:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}(0,\mathbf r) & = & b(r) \\
 v(0,\mathbf r)=\bar{p}_t(0,\mathbf r) & = & 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3448.png"/>

        -

        Note that we have introduced a right hand side $f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        -

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        -\begin{eqnarray*}
+<p> Note that we have introduced a right hand side <picture><source srcset=$f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        +

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        +\begin{eqnarray*}
 \left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_\Omega-
 \left(\theta v^{n}+(1-\theta)v^{n-1},\phi\right)_\Omega & = & 0   \\
 -\left(\nabla((\theta\bar{p}^n+(1-\theta)\bar{p}^{n-1})),\nabla\phi\right)_\Omega-
 \frac{1}{c_0}\left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_{\partial\Omega} -
 \frac{1}{c_0^2}\left(\frac{v^n-v^{n-1}}{k},\phi\right)_\Omega & =
 & \left(\theta f^{n}+(1-\theta)f^{n-1}, \phi\right)_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3450.png"/>

        where $\phi$ is an arbitrary test function, and where we have used the absorbing boundary condition to integrate by parts: absorbing boundary conditions are incorporated into the weak form by using

        -\[
+<picture><source srcset=\[
 \int_\Omega\varphi \, \Delta p\; dx =
 -\int_\Omega\nabla \varphi \cdot \nabla p dx +
 \int_{\partial\Omega}\varphi \frac{\partial p}{\partial {\mathbf n}}ds.
-\] +\]" src="form_3451.png"/>

        From this we obtain the discrete model by introducing a finite number of shape functions, and get

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 M\bar{p}^{n}-k \theta M v^n & = & M\bar{p}^{n-1}+k (1-\theta)Mv^{n-1},\\
 
 (-c_0^2k \theta A-c_0 B)\bar{p}^n-Mv^{n} & = &
 (c_0^2k(1-\theta)A-c_0B)\bar{p}^{n-1}-Mv^{n-1}+c_0^2k(\theta F^{n}+(1-\theta)F^{n-1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3452.png"/>

        -

        The matrices $M$ and $A$ are here as in step-23, and the boundary mass matrix

        -\[
+<p> The matrices <picture><source srcset=$M$ and $A$ are here as in step-23, and the boundary mass matrix

        +\[
         B_{ij} = \left(\varphi_i,\varphi_j\right)_{\partial\Omega}
-\] +\]" src="form_3453.png"/>

        results from the use of absorbing boundary conditions.

        Above two equations can be rewritten in a matrix form with the pressure and its derivative as an unknown vector:

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{cc}
  M         &       -k\theta M \\
 c_0^2\,k\,\theta\,A+c_0\,B  &  M   \\
@@ -238,10 +238,10 @@
  G_1  \\
  G_2 -(\theta F^{n}+(1-\theta)F ^{n-1})c_{0}^{2}k \\
                 \end{array}\right)
-\] +\]" src="form_3454.png"/>

        where

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{c}
 G_1 \\
 G_2 \\
@@ -250,115 +250,115 @@
  M\bar{p}^{n-1}+k(1-\theta)Mv^{n-1}\\
  (-c_{0}^{2}k (1-\theta)A+c_0 B)\bar{p}^{n-1} +Mv^{n-1}
                 \end{array}\right)
-\] +\]" src="form_3455.png"/>

        By simple transformations, one then obtains two equations for the pressure potential and its derivative, just as in the previous tutorial program:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (M+(k\,\theta\,c_{0})^{2}A+c_0k\theta B)\bar{p}^{n} & = &
 G_{1}+(k\, \theta)G_{2}-(c_0k)^2\theta (\theta F^{n}+(1-\theta)F^{n-1}) \\
 Mv^n & = & -(c_0^2\,k\, \theta\, A+c_0B)\bar{p}^{n}+ G_2 -
 c_0^2k(\theta F^{n}+(1-\theta)F^{n-1})
-\end{eqnarray*} +\end{eqnarray*}" src="form_3456.png"/>

        What the program does

        Compared to step-23, this programs adds the treatment of a simple absorbing boundary conditions. In addition, it deals with data obtained from actual experimental measurements. To this end, we need to evaluate the solution at points at which the experiment also evaluates a real pressure field. We will see how to do that using the VectorTools::point_value function further down below.

        Appendix: PDEs with Dirac delta functions as right hand side and their transformation to an initial value problem

        In the derivation of the initial value problem for the wave equation, we initially found that the equation had the derivative of a Dirac delta function as a right hand side:

        -\[
+<picture><source srcset=\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}.
-\] +\]" src="form_3457.png"/>

        -

        In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. $p(t,\mathbf
-r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        -\[
+<p> In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. <picture><source srcset=$p(t,\mathbf
+r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        +\[
 \int^t \Delta p\; dt -\int^t \frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2}
 \; dt
 =
 \int^t \lambda a(\mathbf r)\frac{d\delta(t)}{dt} \;dt.
-\] +\]" src="form_3460.png"/>

        This immediately leads to the statement

        -\[
+<picture><source srcset=\[
 P(t,\mathbf r) - \frac{1}{c_0^2} \frac{\partial p}{\partial t}
 =
/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html	2024-03-17 21:57:44.711242130 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html	2024-03-17 21:57:44.715242155 +0000
@@ -138,57 +138,57 @@
 <dl class=

        Note
        We will cover a separate nonlinear equation from quantum mechanics, the Nonlinear Schrödinger Equation, in step-58.

      Statement of the problem

      The sine-Gordon initial-boundary-value problem (IBVP) we wish to solve consists of the following equations:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   u_{tt}-\Delta u &=& -\sin(u) \quad\mbox{for}\quad (x,t) \in \Omega \times (t_0,t_f],\\
   {\mathbf n} \cdot \nabla u &=& 0 \quad\mbox{for}\quad (x,t) \in \partial\Omega
            \times (t_0,t_f],\\
   u(x,t_0) &=& u_0(x).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3493.png"/>

      -

      It is a nonlinear equation similar to the wave equation we discussed in step-23 and step-24. We have chosen to enforce zero Neumann boundary conditions in order for waves to reflect off the boundaries of our domain. It should be noted, however, that Dirichlet boundary conditions are not appropriate for this problem. Even though the solutions to the sine-Gordon equation are localized, it only makes sense to specify (Dirichlet) boundary conditions at $x=\pm\infty$, otherwise either a solution does not exist or only the trivial solution $u=0$ exists.

      -

      However, the form of the equation above is not ideal for numerical discretization. If we were to discretize the second-order time derivative directly and accurately, then we would need a large stencil (i.e., several time steps would need to be kept in the memory), which could become expensive. Therefore, in complete analogy to what we did in step-23 and step-24, we split the second-order (in time) sine-Gordon equation into a system of two first-order (in time) equations, which we call the split, or velocity, formulation. To this end, by setting $v = u_t$, it is easy to see that the sine-Gordon equation is equivalent to

      -\begin{eqnarray*}
+<p> It is a nonlinear equation similar to the wave equation we discussed in <a class=step-23 and step-24. We have chosen to enforce zero Neumann boundary conditions in order for waves to reflect off the boundaries of our domain. It should be noted, however, that Dirichlet boundary conditions are not appropriate for this problem. Even though the solutions to the sine-Gordon equation are localized, it only makes sense to specify (Dirichlet) boundary conditions at $x=\pm\infty$, otherwise either a solution does not exist or only the trivial solution $u=0$ exists.

      +

      However, the form of the equation above is not ideal for numerical discretization. If we were to discretize the second-order time derivative directly and accurately, then we would need a large stencil (i.e., several time steps would need to be kept in the memory), which could become expensive. Therefore, in complete analogy to what we did in step-23 and step-24, we split the second-order (in time) sine-Gordon equation into a system of two first-order (in time) equations, which we call the split, or velocity, formulation. To this end, by setting $v = u_t$, it is easy to see that the sine-Gordon equation is equivalent to

      +\begin{eqnarray*}
   u_t - v &=& 0,\\
   v_t - \Delta u &=& -\sin(u).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3497.png"/>

      Discretization of the equations in time

      -

      Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

      -\begin{eqnarray*}
+<p>Now, we can discretize the split formulation in time using the <picture><source srcset=$\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

      +\begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k} - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,\\
   \frac{v^n - v^{n-1}}{k} - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& -\sin\left[\theta u^n + (1-\theta) u^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3502.png"/>

      -

      We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

      -\begin{eqnarray*}
+<p>We can simplify the latter via a bit of algebra. Eliminating <picture><source srcset=$v^n$ from the first equation and rearranging, we obtain

      +\begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
          - k^2\theta\sin\left[\theta u^n + (1-\theta) u^{n-1}\right],\\
    v^n &=& v^{n-1} + k\Delta\left[ \theta u^n + (1-\theta) u^{n-1}\right]
          - k\sin\left[ \theta u^n + (1-\theta) u^{n-1} \right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3503.png"/>

      -

      It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

      -

      To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

      -\begin{eqnarray*}
+<p>It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in <picture><source srcset=$v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

      +

      To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

      +\begin{eqnarray*}
   \mbox{ Find } \delta u^n_l \mbox{ s.t. } F'(u^n_l)\delta u^n_l = -F(u^n_l)
   \mbox{, set }  u^n_{l+1} = u^n_l + \delta u^n_l.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3505.png"/>

      -

      The iteration can be initialized with the old time step, i.e. $u^n_0 = u^{n-1}$, and eventually it will produce a solution to the first equation of the split formulation (see above). For the time discretization of the sine-Gordon equation under consideration here, we have that

      -\begin{eqnarray*}
+<p> The iteration can be initialized with the old time step, i.e. <picture><source srcset=$u^n_0 = u^{n-1}$, and eventually it will produce a solution to the first equation of the split formulation (see above). For the time discretization of the sine-Gordon equation under consideration here, we have that

      +\begin{eqnarray*}
   F(u^n_l) &=&  \left[ 1-k^2\theta^2\Delta \right] u^n_l -
                  \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} - k v^{n-1}
                  + k^2\theta\sin\left[\theta u^n_l + (1-\theta) u^{n-1}\right],\\
   F'(u^n_l) &=& 1-k^2\theta^2\Delta + k^2\theta^2\cos\left[\theta u^n_l
                         + (1-\theta) u^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3507.png"/>

      -

      Notice that while $F(u^n_l)$ is a function, $F'(u^n_l)$ is an operator.

      +

      Notice that while $F(u^n_l)$ is a function, $F'(u^n_l)$ is an operator.

      Weak formulation of the time-discretized equations

      -

      With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

      -\begin{eqnarray*}
+<p>With hindsight, we choose both the solution and the test space to be <picture><source srcset=$H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

      +\begin{eqnarray*}
   &\mbox{ Find}& \delta u^n_l \in H^1(\Omega) \mbox{ s.t. }
   \left( F'(u^n_l)\delta u^n_l, \varphi \right)_{\Omega}
   = -\left(F(u^n_l), \varphi \right)_{\Omega} \;\forall\varphi\in H^1(\Omega),
@@ -199,91 +199,91 @@
          - k (1-\theta)\left( \nabla u^{n-1}, \nabla\varphi \right)_{\Omega}
          - k\left(\sin\left[ \theta u^n + (1-\theta) u^{n-1} \right],
          \varphi \right)_{\Omega} \;\forall\varphi\in H^1(\Omega).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3511.png"/>

      -

      Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, $F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
-\,\mathrm{d}x$. Finally, notice that the first equation is, in fact, the definition of an iterative procedure, so it is solved multiple times during each time step until a stopping criterion is met.

      +

      Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, $F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
+\,\mathrm{d}x$. Finally, notice that the first equation is, in fact, the definition of an iterative procedure, so it is solved multiple times during each time step until a stopping criterion is met.

      Discretization of the weak formulation in space

      -

      Using the Finite Element Method, we discretize the variational formulation in space. To this end, let $V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
-< \infty$) with nodal basis $\{\varphi_1,\ldots,\varphi_N\}$. Now, we can expand all functions in the weak formulation (see above) in terms of the nodal basis. Henceforth, we shall denote by a capital letter the vector of coefficients (in the nodal basis) of a function denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N
-U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in
-H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step:

      -\begin{eqnarray*}
+<p>Using the Finite Element Method, we discretize the variational formulation in space. To this end, let <picture><source srcset=$V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
+< \infty$) with nodal basis $\{\varphi_1,\ldots,\varphi_N\}$. Now, we can expand all functions in the weak formulation (see above) in terms of the nodal basis. Henceforth, we shall denote by a capital letter the vector of coefficients (in the nodal basis) of a function denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N
+U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in
+H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step:

      +\begin{eqnarray*}
   F_h'(U^{n,l})\delta U^{n,l} &=& -F_h(U^{n,l}), \qquad
         U^{n,l+1} = U^{n,l} + \delta U^{n,l}, \qquad U^{n,0} = U^{n-1}; \\
   MV^n &=& MV^{n-1} - k \theta AU^n -k (1-\theta) AU^{n-1} - k S(u^n,u^{n-1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3521.png"/>

      -

      Above, the matrix $F_h'(\cdot)$ and the vector $F_h(\cdot)$ denote the discrete versions of the gadgets discussed above, i.e.,

      -\begin{eqnarray*}
+<p> Above, the matrix <picture><source srcset=$F_h'(\cdot)$ and the vector $F_h(\cdot)$ denote the discrete versions of the gadgets discussed above, i.e.,

      +\begin{eqnarray*}
   F_h(U^{n,l}) &=&  \left[ M+k^2\theta^2A \right] U^{n,l} -
                 \left[ M-k^2\theta(1-\theta)A \right] U^{n-1} - k MV^{n-1}
                 + k^2\theta S(u^n_l, u^{n-1}),\\
   F_h'(U^{n,l}) &=& M+k^2\theta^2A
                                 + k^2\theta^2N(u^n_l,u^{n-1})
-\end{eqnarray*} +\end{eqnarray*}" src="form_3524.png"/>

      -

      Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, $M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
-\varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
-  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
-  \varphi_j \right)_{\Omega}$.

      +

      Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, $M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
+\varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
+  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
+  \varphi_j \right)_{\Omega}$.

      What solvers can we use for the first equation? Let's look at the matrix we have to invert:

      -\[
+<picture><source srcset=\[
   (M+k^2\theta^2(A + N))_{ij} =
   \int_\Omega (1+k^2\theta^2 \cos \alpha)
   \varphi_i\varphi_j \; dx
   + k^2 \theta^2 \int_\Omega \nabla\varphi_i\nabla\varphi_j \; dx,
-\] +\]" src="form_3529.png"/>

      -

      for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

      -

      This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

      +

      for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

      +

      This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

      The test case

      There are a few analytical solutions for the sine-Gordon equation, both in 1D and 2D. In particular, the program as is computes the solution to a problem with a single kink-like solitary wave initial condition. This solution is given by Leibbrandt in Phys. Rev. Lett. 41(7), and is implemented in the ExactSolution class.

      It should be noted that this closed-form solution, strictly speaking, only holds for the infinite-space initial-value problem (not the Neumann initial-boundary-value problem under consideration here). However, given that we impose zero Neumann boundary conditions, we expect that the solution to our initial-boundary-value problem would be close to the solution of the infinite-space initial-value problem, if reflections of waves off the boundaries of our domain do not occur. In practice, this is of course not the case, but we can at least assume that this were so.

      -

      The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

      +

      The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

      The solutions that we implement in the ExactSolution class are these:

      • In 1D:

        -\[
+<picture><source srcset=\[
   u(x,t) =
   -4 \arctan\left[
      \frac{m}{\sqrt{1-m^2}}
      \frac{\sin\left(\sqrt{1-m^2}t+c_2\right)}
      {\cosh\left(mx+c_1\right)}
      \right],
-  \] + \]" src="form_3532.png"/>

        -

        where we choose $m=\frac 12, c_1=c_2=0$.

        +

        where we choose $m=\frac 12, c_1=c_2=0$.

        In 1D, more interesting analytical solutions are known. Many of them are listed on http://mathworld.wolfram.com/Sine-GordonEquation.html .

      • In 2D:

        -\[
+<picture><source srcset=\[
     u(x,y,t) = 4 \arctan \left[a_0 e^{s\xi}\right],
-  \] + \]" src="form_3534.png"/>

        where $\xi$ is defined as

        -\[
+<picture><source srcset=\[
     \xi = x \cos\vartheta + \sin(\vartheta) (y\cosh\lambda + t\sinh \lambda),
-  \] + \]" src="form_3535.png"/>

        -

        and where we choose $\vartheta=\frac \pi 4, \lambda=a_0=s=1$.

        +

        and where we choose $\vartheta=\frac \pi 4, \lambda=a_0=s=1$.

      • In 3D:

        -\[
+<picture><source srcset=\[
     u(x,y,z,t) = 4 \arctan \left[c_0 e^{s\xi}\right],
-  \] + \]" src="form_3537.png"/>

        where $\xi$ is defined as

        -\[
+<picture><source srcset=\[
     \xi = x \cos\vartheta + y \sin \vartheta \cos\phi +
           \sin \vartheta \sin\phi (z\cosh\tau + t\sinh \tau),
/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html	2024-03-17 21:57:44.767242476 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html	2024-03-17 21:57:44.775242525 +0000
@@ -166,8 +166,8 @@
   \right].
 \end{align*}

        -

        Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

        -

        Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

        +

        Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

        +

        Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

        \begin{align*}
   M U^n-MU^{n-1}
   +
@@ -185,7 +185,7 @@
   \right],
 \end{align*}

        -

        where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

        +

        where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

        \begin{align*}
   (M
   +
@@ -211,7 +211,7 @@
 <ul>
 <li>
 <p class=Time step size and minimal mesh size: For stationary problems, the general approach is "make the mesh as fine as it is necessary". For problems with singularities, this often leads to situations where we get many levels of refinement into corners or along interfaces. The very first tutorial to use adaptive meshes, step-6, is a point in case already.

        -

        However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

        +

        However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

        The consequence is that refining the mesh further in one place implies not only the moderate additional effort of increasing the number of degrees of freedom slightly, but also the much larger effort of having the solve the global linear system more often because of the smaller time step.

        In practice, one typically deals with this by acknowledging that we can not make the time step arbitrarily small, and consequently can not make the local mesh size arbitrarily small. Rather, we set a maximal level of refinement and when we flag cells for refinement, we simply do not refine those cells whose children would exceed this maximal level of refinement.

        There is a similar problem in that we will choose a right hand side that will switch on in different parts of the domain at different times. To avoid being caught flat footed with too coarse a mesh in areas where we suddenly need a finer mesh, we will also enforce in our program a minimal mesh refinement level.

        @@ -242,7 +242,7 @@ \sum_j U^n \varphi_j(\mathbf x), \end{align*}" src="form_3618.png"/>

        -

        multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

        +

        multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

        \begin{align*}
     \sum_j
     (M
@@ -262,7 +262,7 @@
     \right].
   \end{align*}

        -

        Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

        +

        Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

        \begin{align*}
     (\varphi_i, u_h^{n-1})
     =
@@ -274,7 +274,7 @@
     i=1\ldots N_n.
   \end{align*}

        -

        If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

        +

        If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

        In any case, what we have to face is a situation where we need to integrate shape functions defined on two different meshes. This can be done, and is in fact demonstrated in step-28, but the process is at best described by the word "awkward".

        In practice, one does not typically want to do this. Rather, we avoid the whole situation by interpolating the solution from the old to the new mesh every time we adapt the mesh. In other words, rather than solving the equations above, we instead solve the problem

        \begin{align*}
@@ -296,14 +296,14 @@
     \right],
   \end{align*}

        -

        where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

        +

        where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

      What could possibly go wrong? Verifying whether the code is correct

      There are a number of things one can typically get wrong when implementing a finite element code. In particular, for time dependent problems, the following are common sources of bugs:

        -
      • The time integration, for example by getting the coefficients in front of the terms involving the current and previous time steps wrong (e.g., mixing up a factor $\theta$ for $1-\theta$).
      • -
      • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
      • -
      • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.
      • +
      • The time integration, for example by getting the coefficients in front of the terms involving the current and previous time steps wrong (e.g., mixing up a factor $\theta$ for $1-\theta$).
      • +
      • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
      • +
      • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.

      A less common problem is getting the initial conditions wrong because one can typically see that it is wrong by just outputting the first time step. In any case, in order to verify the correctness of the code, it is helpful to have a testing protocol that allows us to verify each of these components separately. This means:

      • Testing the code with nonzero initial conditions but zero right hand side and boundary values and verifying that the time evolution is correct.
      • @@ -339,7 +339,7 @@ \end{align*}" src="form_3637.png"/>

        In other words, if the initial condition is a product of sines, then the solution has exactly the same shape of a product of sines that decays to zero with a known time dependence. This is something that is easy to test if you have a sufficiently fine mesh and sufficiently small time step.

        -

        What is typically going to happen if you get the time integration scheme wrong (e.g., by having the wrong factors of $\theta$ or $k$ in front of the various terms) is that you don't get the right temporal behavior of the solution. Double check the various factors until you get the right behavior. You may also want to verify that the temporal decay rate (as determined, for example, by plotting the value of the solution at a fixed point) does not double or halve each time you double or halve the time step or mesh size. You know that it's not the handling of the boundary conditions or right hand side because these were both zero.

        +

        What is typically going to happen if you get the time integration scheme wrong (e.g., by having the wrong factors of $\theta$ or $k$ in front of the various terms) is that you don't get the right temporal behavior of the solution. Double check the various factors until you get the right behavior. You may also want to verify that the temporal decay rate (as determined, for example, by plotting the value of the solution at a fixed point) does not double or halve each time you double or halve the time step or mesh size. You know that it's not the handling of the boundary conditions or right hand side because these were both zero.

        If you have so verified that the time integrator is correct, take the situation where the right hand side is nonzero but the initial conditions are zero: $u_0(x,y)=0$ and $f(x,y,t)=\sin(n_x \pi x) \sin(n_x \pi y)$. Again,

        \begin{align*}
   \left(\frac{\partial}{\partial t} -\Delta\right)
@@ -362,7 +362,7 @@
   a(t) = \frac{1}{(n_x^2+n_y^2)\pi^2} \left[ 1 - e^{-(n_x^2+n_y^2)\pi^2 t} \right].
 \end{align*}

        -

        Again, if you have the wrong factors of $\theta$ or $k$ in front of the right hand side terms you will either not get the right temporal behavior of the solution, or it will converge to a maximum value other than $\frac{1}{(n_x^2+n_y^2)\pi^2}$.

        +

        Again, if you have the wrong factors of $\theta$ or $k$ in front of the right hand side terms you will either not get the right temporal behavior of the solution, or it will converge to a maximum value other than $\frac{1}{(n_x^2+n_y^2)\pi^2}$.

        Once we have verified that the time integration and right hand side handling are correct using this scheme, we can go on to verifying that we have the boundary values correct, using a very similar approach.

        The testcase

        Solving the heat equation on a simple domain with a simple right hand side almost always leads to solutions that are exceedingly boring, since they become very smooth very quickly and then do not move very much any more. Rather, we here solve the equation on the L-shaped domain with zero Dirichlet boundary values and zero initial conditions, but as right hand side we choose

        @@ -410,7 +410,7 @@ \right. \end{align*}" src="form_3646.png"/>

        -

        In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

        +

        In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

        If you interpret the heat equation as finding the spatially and temporally variable temperature distribution of a conducting solid, then the test case above corresponds to an L-shaped body where we keep the boundary at zero temperature, and heat alternatingly in two parts of the domain. While heating is in effect, the temperature rises in these places, after which it diffuses and diminishes again. The point of these initial conditions is that they provide us with a solution that has singularities both in time (when sources switch on and off) as well as time (at the reentrant corner as well as at the edges and corners of the regions where the source acts).

        The commented program

        The program starts with the usual include files, all of which you should have seen before by now:

        @@ -797,7 +797,7 @@
          system_rhs.add(-(1 - theta) * time_step, tmp);
         

        The second piece is to compute the contributions of the source terms. This corresponds to the term $k_n
-   \left[ (1-\theta)F^{n-1} + \theta F^n \right]$. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

        + \left[ (1-\theta)F^{n-1} + \theta F^n \right]$" src="form_3649.png"/>. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

          RightHandSide<dim> rhs_function;
          rhs_function.set_time(time);
        @@ -1006,27 +1006,27 @@

        There are two factors at play. First, there are some islands where cells have been refined but that are surrounded by non-refined cells (and there are probably also a few occasional coarsened islands). These are not terrible, as they most of the time do not affect the approximation quality of the mesh, but they also don't help because so many of their additional degrees of freedom are in fact constrained by hanging node constraints. That said, this is easy to fix: the Triangulation class takes an argument to its constructor indicating a level of "mesh smoothing". Passing one of many possible flags, this instructs the triangulation to refine some additional cells, or not to refine some cells, so that the resulting mesh does not have these artifacts.

        The second problem is more severe: the mesh appears to lag the solution. The underlying reason is that we only adapt the mesh once every fifth time step, and only allow for a single refinement in these cases. Whenever a source switches on, the solution had been very smooth in this area before and the mesh was consequently rather coarse. This implies that the next time step when we refine the mesh, we will get one refinement level more in this area, and five time steps later another level, etc. But this is not enough: first, we should refine immediately when a source switches on (after all, in the current context we at least know what the right hand side is), and we should allow for more than one refinement level. Of course, all of this can be done using deal.II, it just requires a bit of algorithmic thinking in how to make this work!

        Positivity preservation

        -

        To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

        +

        To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

        To get an idea of this behavior mathematically, let us consider a general, fully discrete problem:

        \begin{align*}
   A u^{n} = B u^{n-1}.
 \end{align*}

        -

        The general form of the $i$th equation then reads:

        +

        The general form of the $i$th equation then reads:

        \begin{align*}
   a_{ii} u^{n}_i &= b_{ii} u^{n-1}_i +
   \sum\limits_{j \in S_i} \left( b_{ij} u^{n-1}_j - a_{ij} u^{n}_j \right),
 \end{align*}

        -

        where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

        +

        where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

        \begin{align*}
   a_{ii} &> 0, & b_{ii} &\geq 0, & a_{ij} &\leq 0, & b_{ij} &\geq 0,
   &
   \forall j &\in S_i,
 \end{align*}

        -

        all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

        -

        Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

        +

        all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

        +

        Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

        \begin{align*}
   (1 - \theta) k a_{ii} &\leq m_{ii},\qquad \forall i,
   &
/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html	2024-03-17 21:57:44.839242921 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html	2024-03-17 21:57:44.843242945 +0000
@@ -146,7 +146,7 @@
 </ol> </td> </tr> </table>
  <a class=

        Introduction

        This tutorial program attempts to show how to use $hp$-finite element methods with deal.II. It solves the Laplace equation and so builds only on the first few tutorial programs, in particular on step-4 for dimension independent programming and step-6 for adaptive mesh refinement.

        -

        The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

        +

        The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

        In order to implement this method, we need several things above and beyond what a usual finite element program needs, and in particular above what we have introduced in the tutorial programs leading up to step-6. In particular, we will have to discuss the following aspects:

        • Instead of using the same finite element on all cells, we now will want a collection of finite element objects, and associate each cell with one of these objects in this collection.

          @@ -223,10 +223,10 @@

          One of the central pieces of the adaptive finite element method is that we inspect the computed solution (a posteriori) with an indicator that tells us which are the cells where the error is largest, and then refine them. In many of the other tutorial programs, we use the KellyErrorEstimator class to get an indication of the size of the error on a cell, although we also discuss more complicated strategies in some programs, most importantly in step-14.

          In any case, as long as the decision is only "refine this cell" or "do not refine this cell", the actual refinement step is not particularly challenging. However, here we have a code that is capable of hp-refinement, i.e., we suddenly have two choices whenever we detect that the error on a certain cell is too large for our liking: we can refine the cell by splitting it into several smaller ones, or we can increase the polynomial degree of the shape functions used on it. How do we know which is the more promising strategy? Answering this question is the central problem in $hp$-finite element research at the time of this writing.

          -

          In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

          +

          In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

          In the following, we propose a simple estimator of the local smoothness of a solution. As we will see in the results section, this estimator has flaws, in particular as far as cells with local hanging nodes are concerned. We therefore do not intend to present the following ideas as a complete solution to the problem. Rather, it is intended as an idea to approach it that merits further research and investigation. In other words, we do not intend to enter a sophisticated proposal into the fray about answers to the general question. However, to demonstrate our approach to $hp$-finite elements, we need a simple indicator that does generate some useful information that is able to drive the simple calculations this tutorial program will perform.

          The idea

          -

          Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

          +

          Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

          \[
    \int_K |\nabla^s u({\bf x})|^2 \; d{\bf x} < \infty.
 \] @@ -273,7 +273,7 @@ \]" src="form_3676.png"/>

          Put differently: the higher regularity $s$ we want, the faster the Fourier coefficients have to go to zero. If you wonder where the additional exponent $\frac{d-1}2$ comes from: we would like to make use of the fact that $\sum_l a_l < \infty$ if the sequence $a_l =
-{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
+{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
 \ldots$. In the same way as we prove that the sequence $a_l$ above converges by replacing the sum by an integral over the entire line, we can replace our $d$-dimensional sum by an integral over $d$-dimensional space. Now we have to note that between distance $|{\bf k}|$ and $|{\bf k}|+d|{\bf k}|$, there are, up to a constant, $|{\bf k}|^{d-1}$ modes, in much the same way as we can transform the volume element $dx\;dy$ into $2\pi r\; dr$. Consequently, it is no longer $|{\bf k}|^{2s}|\hat
 U_{\bf k}|^2$ that has to decay as ${\cal O}(|{\bf k}|^{-1-\epsilon})$, but it is in fact $|{\bf k}|^{2s}|\hat U_{\bf k}|^2 |{\bf k}|^{d-1}$. A comparison of exponents yields the result.

          We can turn this around: Assume we are given a function $\hat u$ of unknown smoothness. Let us compute its Fourier coefficients $\hat U_{\bf k}$ and see how fast they decay. If they decay as

          @@ -283,7 +283,7 @@

          then consequently the function we had here was in $H^{\mu-d/2}$.

          What we have to do

          -

          So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

          +

          So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

          \[
    \hat U_{\bf k}
    = \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat u(\hat{\bf x}) d\hat{\bf x}
@@ -298,7 +298,7 @@
    d\hat{\bf x} \right] u_i,
 \]

          -

          where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

          +

          where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

          \[
    \hat U_{\bf k}
    = {\cal F}_{{\bf k},j} u_j,
@@ -311,7 +311,7 @@
    \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat \varphi_j(\hat{\bf x}) d\hat{\bf x}.
 \]

          -

          This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

          +

          This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

          The next task is that we have to estimate how fast these coefficients decay with $|{\bf k}|$. The problem is that, of course, we have only finitely many of these coefficients in the first place. In other words, the best we can do is to fit a function $\alpha |{\bf k}|^{-\mu}$ to our data points $\hat U_{\bf k}$, for example by determining $\alpha,\mu$ via a least-squares procedure:

          \[
    \min_{\alpha,\mu}
@@ -335,7 +335,7 @@
    \left( \ln |\hat U_{\bf k}| - \beta + \mu \ln |{\bf k}|\right)^2,
 \]

          -

          where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
 \frac{\partial Q}{\partial\mu}=0$, are linear in $\beta,\mu$. We can write these conditions as follows:

          \[
    \left(\begin{array}{cc}
@@ -394,11 +394,11 @@
    }.
 \]

          -

          This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

          +

          This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

          These steps outlined above are applicable to many different scenarios, which motivated the introduction of a generic function SmoothnessEstimator::Fourier::coefficient_decay() in deal.II, that combines all the tasks described in this section in one simple function call. We will use it in the implementation of this program.

          Compensating for anisotropy

          In the formulas above, we have derived the Fourier coefficients $\hat U_{\bf
-k}$. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

          +k}$" src="form_3710.png"/>. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

          One can probably argue for either case. The issue would be of more interest if deal.II had the ability to use anisotropic finite elements, i.e., ones that use different polynomial degrees in different spatial directions, as they would be able to exploit the directionally variable smoothness much better. Alas, this capability does not exist at the time of writing this tutorial program.

          Either way, because we only have isotopic finite element classes, we adopt the viewpoint that we should tailor the polynomial degree to the lowest amount of regularity, in order to keep numerical efforts low. Consequently, instead of using the formula

          \[
@@ -419,7 +419,7 @@
    }.
 \]

          -

          To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

          +

          To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

          \[
   \sum_{{\bf k}, |{\bf k}|\le N}
   \longrightarrow
@@ -429,14 +429,14 @@
 </p>
 <p> This is the form we will implement in the program.</p>
 <p><a class=

          Questions about cell sizes

          -

          One may ask whether it is a problem that we only compute the Fourier transform on the reference cell (rather than the real cell) of the solution. After all, we stretch the solution by a factor $\frac 1h$ during the transformation, thereby shifting the Fourier frequencies by a factor of $h$. This is of particular concern since we may have neighboring cells with mesh sizes $h$ that differ by a factor of 2 if one of them is more refined than the other. The concern is also motivated by the fact that, as we will see in the results section below, the estimated smoothness of the solution should be a more or less continuous function, but exhibits jumps at locations where the mesh size jumps. It therefore seems natural to ask whether we have to compensate for the transformation.

          +

          One may ask whether it is a problem that we only compute the Fourier transform on the reference cell (rather than the real cell) of the solution. After all, we stretch the solution by a factor $\frac 1h$ during the transformation, thereby shifting the Fourier frequencies by a factor of $h$. This is of particular concern since we may have neighboring cells with mesh sizes $h$ that differ by a factor of 2 if one of them is more refined than the other. The concern is also motivated by the fact that, as we will see in the results section below, the estimated smoothness of the solution should be a more or less continuous function, but exhibits jumps at locations where the mesh size jumps. It therefore seems natural to ask whether we have to compensate for the transformation.

          The short answer is "no". In the process outlined above, we attempt to find coefficients $\beta,\mu$ that minimize the sum of squares of the terms

          \[
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln |{\bf k}|.
 \]

          -

          To compensate for the transformation means not attempting to fit a decay $|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
-k|h$, where $h$ is the norm of the transformation operator (i.e., something like the diameter of the cell). In other words, we would have to minimize the sum of squares of the terms

          +

          To compensate for the transformation means not attempting to fit a decay $|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
+k|h$, where $h$ is the norm of the transformation operator (i.e., something like the diameter of the cell). In other words, we would have to minimize the sum of squares of the terms

          \[
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln (|{\bf k}|h).
 \] @@ -446,7 +446,7 @@ \ln |\hat U_{{\bf k}}| - (\beta - \mu \ln h) + \mu \ln (|{\bf k}|). \]" src="form_3720.png"/>

          -

          In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

          +

          In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

          Complications with linear systems for hp-discretizations

          Creating the sparsity pattern

          One of the problems with $hp$-methods is that the high polynomial degree of shape functions together with the large number of constrained degrees of freedom leads to matrices with large numbers of nonzero entries in some rows. At the same time, because there are areas where we use low polynomial degree and consequently matrix rows with relatively few nonzero entries. Consequently, allocating the sparsity pattern for these matrices is a challenge: we cannot simply assemble a SparsityPattern by starting with an estimate of the bandwidth without using a lot of extra memory.

          @@ -460,7 +460,7 @@

          The early tutorial programs use first or second degree finite elements, so removing entries in the sparsity pattern corresponding to constrained degrees of freedom does not have a large impact on the overall number of zeros explicitly stored by the matrix. However, since as many as a third of the degrees of freedom may be constrained in an hp-discretization (and, with higher degree elements, these constraints can couple one DoF to as many as ten or twenty other DoFs), it is worthwhile to take these constraints into consideration since the resulting matrix will be much sparser (and, therefore, matrix-vector products or factorizations will be substantially faster too).

          Eliminating constrained degrees of freedom

          A second problem particular to $hp$-methods arises because we have so many constrained degrees of freedom: typically up to about one third of all degrees of freedom (in 3d) are constrained because they either belong to cells with hanging nodes or because they are on cells adjacent to cells with a higher or lower polynomial degree. This is, in fact, not much more than the fraction of constrained degrees of freedom in non- $hp$-mode, but the difference is that each constrained hanging node is constrained not only against the two adjacent degrees of freedom, but is constrained against many more degrees of freedom.

          -

          It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

          +

          It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

          In our program, we will also treat the boundary conditions as (possibly inhomogeneous) constraints and eliminate the matrix rows and columns to those as well. All we have to do for this is to call the function that interpolates the Dirichlet boundary conditions already in the setup phase in order to tell the AffineConstraints object about them, and then do the transfer from local to global data on matrix and vector simultaneously. This is exactly what we've shown in step-6.

          The test case

          The test case we will solve with this program is a re-take of the one we already look at in step-14: we solve the Laplace equation

          @@ -468,7 +468,7 @@ -\Delta u = f \]" src="form_3722.png"/>

          -

          in 2d, with $f=(x+1)(y+1)$, and with zero Dirichlet boundary values for $u$. We do so on the domain $[-1,1]^2\backslash[-\frac 12,\frac 12]^2$, i.e., a square with a square hole in the middle.

          +

          in 2d, with $f=(x+1)(y+1)$, and with zero Dirichlet boundary values for $u$. We do so on the domain $[-1,1]^2\backslash[-\frac 12,\frac 12]^2$, i.e., a square with a square hole in the middle.

          The difference to step-14 is of course that we use $hp$-finite elements for the solution. The test case is of interest because it has re-entrant corners in the corners of the hole, at which the solution has singularities. We therefore expect that the solution will be smooth in the interior of the domain, and rough in the vicinity of the singularities. The hope is that our refinement and smoothness indicators will be able to see this behavior and refine the mesh close to the singularities, while the polynomial degree is increased away from it. As we will see in the results section, this is indeed the case.

          The commented program

          Include files

          @@ -710,7 +710,7 @@

        LaplaceProblem::solve

        -

        The function solving the linear system is entirely unchanged from previous examples. We simply try to reduce the initial residual (which equals the $l_2$ norm of the right hand side) by a certain factor:

        +

        The function solving the linear system is entirely unchanged from previous examples. We simply try to reduce the initial residual (which equals the $l_2$ norm of the right hand side) by a certain factor:

          template <int dim>
          void LaplaceProblem<dim>::solve()
          {
        @@ -734,7 +734,7 @@

        LaplaceProblem::postprocess

        -

        After solving the linear system, we will want to postprocess the solution. Here, all we do is to estimate the error, estimate the local smoothness of the solution as described in the introduction, then write graphical output, and finally refine the mesh in both $h$ and $p$ according to the indicators computed before. We do all this in the same function because we want the estimated error and smoothness indicators not only for refinement, but also include them in the graphical output.

        +

        After solving the linear system, we will want to postprocess the solution. Here, all we do is to estimate the error, estimate the local smoothness of the solution as described in the introduction, then write graphical output, and finally refine the mesh in both $h$ and $p$ according to the indicators computed before. We do all this in the same function because we want the estimated error and smoothness indicators not only for refinement, but also include them in the graphical output.

          template <int dim>
          void LaplaceProblem<dim>::postprocess(const unsigned int cycle)
          {
        @@ -789,7 +789,7 @@
          }
         
        std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
        Definition utilities.cc:471
        -

        After this, we would like to actually refine the mesh, in both $h$ and $p$. The way we are going to do this is as follows: first, we use the estimated error to flag those cells for refinement that have the largest error. This is what we have always done:

        +

        After this, we would like to actually refine the mesh, in both $h$ and $p$. The way we are going to do this is as follows: first, we use the estimated error to flag those cells for refinement that have the largest error. This is what we have always done:

          {
          estimated_error_per_cell,
        @@ -797,12 +797,12 @@
          0.03);
         
        void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
        -

        Next we would like to figure out which of the cells that have been flagged for refinement should actually have $p$ increased instead of $h$ decreased. The strategy we choose here is that we look at the smoothness indicators of those cells that are flagged for refinement, and increase $p$ for those with a smoothness larger than a certain relative threshold. In other words, for every cell for which (i) the refinement flag is set, (ii) the smoothness indicator is larger than the threshold, and (iii) we still have a finite element with a polynomial degree higher than the current one in the finite element collection, we will assign a future FE index that corresponds to a polynomial with degree one higher than it currently is. The following function is capable of doing exactly this. Absent any better strategies, we will set the threshold via interpolation between the minimal and maximal smoothness indicators on cells flagged for refinement. Since the corner singularities are strongly localized, we will favor $p$- over $h$-refinement quantitatively. We achieve this with a low threshold by setting a small interpolation factor of 0.2. In the same way, we deal with cells that are going to be coarsened and decrease their polynomial degree when their smoothness indicator is below the corresponding threshold determined on cells to be coarsened.

        +

        Next we would like to figure out which of the cells that have been flagged for refinement should actually have $p$ increased instead of $h$ decreased. The strategy we choose here is that we look at the smoothness indicators of those cells that are flagged for refinement, and increase $p$ for those with a smoothness larger than a certain relative threshold. In other words, for every cell for which (i) the refinement flag is set, (ii) the smoothness indicator is larger than the threshold, and (iii) we still have a finite element with a polynomial degree higher than the current one in the finite element collection, we will assign a future FE index that corresponds to a polynomial with degree one higher than it currently is. The following function is capable of doing exactly this. Absent any better strategies, we will set the threshold via interpolation between the minimal and maximal smoothness indicators on cells flagged for refinement. Since the corner singularities are strongly localized, we will favor $p$- over $h$-refinement quantitatively. We achieve this with a low threshold by setting a small interpolation factor of 0.2. In the same way, we deal with cells that are going to be coarsened and decrease their polynomial degree when their smoothness indicator is below the corresponding threshold determined on cells to be coarsened.

          dof_handler, smoothness_indicators, 0.2, 0.2);
         
        void p_adaptivity_from_relative_threshold(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &criteria, const double p_refine_fraction=0.5, const double p_coarsen_fraction=0.5, const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_refine=std::greater_equal< Number >(), const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_coarsen=std::less_equal< Number >())
        -

        The above function only determines whether the polynomial degree will change via future FE indices, but does not manipulate the $h$-refinement flags. So for cells that are flagged for both refinement categories, we prefer $p$- over $h$-refinement. The following function call ensures that only one of $p$- or $h$-refinement is imposed, and not both at once.

        +

        The above function only determines whether the polynomial degree will change via future FE indices, but does not manipulate the $h$-refinement flags. So for cells that are flagged for both refinement categories, we prefer $p$- over $h$-refinement. The following function call ensures that only one of $p$- or $h$-refinement is imposed, and not both at once.

         
        void choose_p_over_h(const DoFHandler< dim, spacedim > &dof_handler)
        @@ -961,7 +961,7 @@

        The bigger question is, of course, how to avoid this problem. Possibilities include estimating the smoothness not on single cells, but cell assemblies or patches surrounding each cell. It may also be possible to find simple correction factors for each cell depending on the number of constrained degrees of freedom it has. In either case, there are ample opportunities for further research on finding good $hp$-refinement criteria. On the other hand, the main point of the current program was to demonstrate using the $hp$-technology in deal.II, which is unaffected by our use of a possible sub-optimal refinement criterion.

        Possibilities for extensions

        Different hp-decision strategies

        -

        This tutorial demonstrates only one particular strategy to decide between $h$- and $p$-adaptation. In fact, there are many more ways to automatically decide on the adaptation type, of which a few are already implemented in deal.II:

          +

          This tutorial demonstrates only one particular strategy to decide between $h$- and $p$-adaptation. In fact, there are many more ways to automatically decide on the adaptation type, of which a few are already implemented in deal.II:

          • Fourier coefficient decay: This is the strategy currently implemented in this tutorial. For more information on this strategy, see the general documentation of the SmoothnessEstimator::Fourier namespace.

            @@ -972,12 +972,12 @@

          • -

            Refinement history: The last strategy is quite different from the other two. In theory, we know how the error will converge after changing the discretization of the function space. With $h$-refinement the solution converges algebraically as already pointed out in step-7. If the solution is sufficiently smooth, though, we expect that the solution will converge exponentially with increasing polynomial degree of the finite element. We can compare a proper prediction of the error with the actual error in the following step to see if our choice of adaptation type was justified.

            -

            The transition to this strategy is a bit more complicated. For this, we need an initialization step with pure $h$- or $p$-refinement and we need to transfer the predicted errors over adapted meshes. The extensive documentation of the hp::Refinement::predict_error() function describes not only the theoretical details of this approach, but also presents a blueprint on how to implement this strategy in your code. For more information, see [melenk2001hp] .

            +

            Refinement history: The last strategy is quite different from the other two. In theory, we know how the error will converge after changing the discretization of the function space. With $h$-refinement the solution converges algebraically as already pointed out in step-7. If the solution is sufficiently smooth, though, we expect that the solution will converge exponentially with increasing polynomial degree of the finite element. We can compare a proper prediction of the error with the actual error in the following step to see if our choice of adaptation type was justified.

            +

            The transition to this strategy is a bit more complicated. For this, we need an initialization step with pure $h$- or $p$-refinement and we need to transfer the predicted errors over adapted meshes. The extensive documentation of the hp::Refinement::predict_error() function describes not only the theoretical details of this approach, but also presents a blueprint on how to implement this strategy in your code. For more information, see [melenk2001hp] .

            Note that with this particular function you cannot predict the error for the next time step in time-dependent problems. Therefore, this strategy cannot be applied to this type of problem without further ado. Alternatively, the following approach could be used, which works for all the other strategies as well: start each time step with a coarse mesh, keep refining until happy with the result, and only then move on to the next time step.

          -

          Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

          /usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 2024-03-17 21:57:44.935243514 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 2024-03-17 21:57:44.939243539 +0000 @@ -174,8 +174,8 @@

          Introduction

          In this example, we intend to solve the multigroup diffusion approximation of the neutron transport equation. Essentially, the way to view this is as follows: In a nuclear reactor, neutrons are speeding around at different energies, get absorbed or scattered, or start a new fission event. If viewed at long enough length scales, the movement of neutrons can be considered a diffusion process.

          -

          A mathematical description of this would group neutrons into energy bins, and consider the balance equations for the neutron fluxes in each of these bins, or energy groups. The scattering, absorption, and fission events would then be operators within the diffusion equation describing the neutron fluxes. Assume we have energy groups $g=1,\ldots,G$, where by convention we assume that the neutrons with the highest energy are in group 1 and those with the lowest energy in group $G$. Then the neutron flux of each group satisfies the following equations:

          -\begin{eqnarray*}
+<p>A mathematical description of this would group neutrons into energy bins, and consider the balance equations for the neutron fluxes in each of these bins, or energy groups. The scattering, absorption, and fission events would then be operators within the diffusion equation describing the neutron fluxes. Assume we have energy groups <picture><source srcset=$g=1,\ldots,G$, where by convention we assume that the neutrons with the highest energy are in group 1 and those with the lowest energy in group $G$. Then the neutron flux of each group satisfies the following equations:

          +\begin{eqnarray*}
 \frac 1{v_g}\frac{\partial \phi_g(x,t)}{\partial t}
 &=&
 \nabla \cdot(D_g(x) \nabla \phi_g(x,t))
@@ -189,25 +189,25 @@
 \sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)
 +
 s_{\mathrm{ext},g}(x,t)
-\end{eqnarray*} +\end{eqnarray*}" src="form_3729.png"/>

          -

          augmented by appropriate boundary conditions. Here, $v_g$ is the velocity of neutrons within group $g$. In other words, the change in time in flux of neutrons in group $g$ is governed by the following processes:

            +

            augmented by appropriate boundary conditions. Here, $v_g$ is the velocity of neutrons within group $g$. In other words, the change in time in flux of neutrons in group $g$ is governed by the following processes:

            • -Diffusion $\nabla \cdot(D_g(x) \nabla \phi_g(x,t))$. Here, $D_g$ is the (spatially variable) diffusion coefficient.
            • +Diffusion $\nabla \cdot(D_g(x) \nabla \phi_g(x,t))$. Here, $D_g$ is the (spatially variable) diffusion coefficient.
            • -Absorption $\Sigma_{r,g}(x)\phi_g(x,t)$ (note the negative sign). The coefficient $\Sigma_{r,g}$ is called the removal cross section.
            • +Absorption $\Sigma_{r,g}(x)\phi_g(x,t)$ (note the negative sign). The coefficient $\Sigma_{r,g}$ is called the removal cross section.
            • -Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
            • +Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
            • -Scattering $\sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)$ of neutrons of energy $g'$ producing neutrons of energy $g$. $\Sigma_{s,g'\to g}$ is called the scattering cross section. The case of elastic, in-group scattering $g'=g$ exists, too, but we subsume this into the removal cross section. The case $g'<g$ is called down-scattering, since a neutron loses energy in such an event. On the other hand, $g'>g$ corresponds to up-scattering: a neutron gains energy in a scattering event from the thermal motion of the atoms surrounding it; up-scattering is therefore only an important process for neutrons with kinetic energies that are already on the same order as the thermal kinetic energy (i.e. in the sub $eV$ range).
            • +Scattering $\sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)$ of neutrons of energy $g'$ producing neutrons of energy $g$. $\Sigma_{s,g'\to g}$ is called the scattering cross section. The case of elastic, in-group scattering $g'=g$ exists, too, but we subsume this into the removal cross section. The case $g'<g$ is called down-scattering, since a neutron loses energy in such an event. On the other hand, $g'>g$ corresponds to up-scattering: a neutron gains energy in a scattering event from the thermal motion of the atoms surrounding it; up-scattering is therefore only an important process for neutrons with kinetic energies that are already on the same order as the thermal kinetic energy (i.e. in the sub $eV$ range).
            • -An extraneous source $s_{\mathrm{ext},g}$.
            • +An extraneous source $s_{\mathrm{ext},g}$.

            For realistic simulations in reactor analysis, one may want to split the continuous spectrum of neutron energies into many energy groups, often up to 100. However, if neutron energy spectra are known well enough for some type of reactor (for example Pressurized Water Reactors, PWR), it is possible to obtain satisfactory results with only 2 energy groups.

            -

            In the program shown in this tutorial program, we provide the structure to compute with as many energy groups as desired. However, to keep computing times moderate and in order to avoid tabulating hundreds of coefficients, we only provide the coefficients for above equations for a two-group simulation, i.e. $g=1,2$. We do, however, consider a realistic situation by assuming that the coefficients are not constant, but rather depend on the materials that are assembled into reactor fuel assemblies in rather complicated ways (see below).

            +

            In the program shown in this tutorial program, we provide the structure to compute with as many energy groups as desired. However, to keep computing times moderate and in order to avoid tabulating hundreds of coefficients, we only provide the coefficients for above equations for a two-group simulation, i.e. $g=1,2$. We do, however, consider a realistic situation by assuming that the coefficients are not constant, but rather depend on the materials that are assembled into reactor fuel assemblies in rather complicated ways (see below).

            The eigenvalue problem

            If we consider all energy groups at once, we may write above equations in the following operator form:

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \frac 1v \frac{\partial \phi}{\partial t}
 =
 -L\phi
@@ -217,64 +217,64 @@
 X\phi
 +
 s_{\mathrm{ext}},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3749.png"/>

            -

            where $L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

            -

            It is well known that this equation admits a stable solution if all eigenvalues of the operator $-L+F+X$ are negative. This can be readily seen by multiplying the equation by $\phi$ and integrating over the domain, leading to

            -\begin{eqnarray*}
+<p> where <picture><source srcset=$L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

            +

            It is well known that this equation admits a stable solution if all eigenvalues of the operator $-L+F+X$ are negative. This can be readily seen by multiplying the equation by $\phi$ and integrating over the domain, leading to

            +\begin{eqnarray*}
   \frac 1{2v} \frac{\partial}{\partial t}  \|\phi\|^2 = ((-L+F+X)\phi,\phi).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3752.png"/>

            Stability means that the solution does not grow, i.e. we want the left hand side to be less than zero, which is the case if the eigenvalues of the operator on the right are all negative. For obvious reasons, it is not very desirable if a nuclear reactor produces neutron fluxes that grow exponentially, so eigenvalue analyses are the bread-and-butter of nuclear engineers. The main point of the program is therefore to consider the eigenvalue problem

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-F-X) \phi = \lambda \phi,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3753.png"/>

            -

            where we want to make sure that all eigenvalues are positive. Note that $L$, being the diffusion operator plus the absorption (removal), is positive definite; the condition that all eigenvalues are positive therefore means that we want to make sure that fission and inter-group scattering are weak enough to not shift the spectrum into the negative.

            -

            In nuclear engineering, one typically looks at a slightly different formulation of the eigenvalue problem. To this end, we do not just multiply with $\phi$ and integrate, but rather multiply with $\phi(L-X)^{-1}$. We then get the following evolution equation:

            -\begin{eqnarray*}
+<p> where we want to make sure that all eigenvalues are positive. Note that <picture><source srcset=$L$, being the diffusion operator plus the absorption (removal), is positive definite; the condition that all eigenvalues are positive therefore means that we want to make sure that fission and inter-group scattering are weak enough to not shift the spectrum into the negative.

            +

            In nuclear engineering, one typically looks at a slightly different formulation of the eigenvalue problem. To this end, we do not just multiply with $\phi$ and integrate, but rather multiply with $\phi(L-X)^{-1}$. We then get the following evolution equation:

            +\begin{eqnarray*}
   \frac 1{2v} \frac{\partial}{\partial t}  \|\phi\|^2_{(L-X)^{-1}} = ((L-X)^{-1}(-L+F+X)\phi,\phi).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3755.png"/>

            Stability is then guaranteed if the eigenvalues of the following problem are all negative:

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-X)^{-1}(-L+F+X)\phi = \lambda_F \phi,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3756.png"/>

            which is equivalent to the eigenvalue problem

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-X)\phi = \frac 1{\lambda_F+1} F \phi.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3757.png"/>

            The typical formulation in nuclear engineering is to write this as

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-X) \phi = \frac 1{k_{\mathrm{eff}}} F \phi,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3758.png"/>

            -

            where $k_{\mathrm{eff}}=\frac 1{\lambda^F+1}$. Intuitively, $k_{\mathrm{eff}}$ is something like the multiplication factor for neutrons per typical time scale and should be less than or equal to one for stable operation of a reactor: if it is less than one, the chain reaction will die down, whereas nuclear bombs for example have a $k$-eigenvalue larger than one. A stable reactor should have $k_{\mathrm{eff}}=1$.

            -

            For those who wonder how this can be achieved in practice without inadvertently getting slightly larger than one and triggering a nuclear bomb: first, fission processes happen on different time scales. While most neutrons are released very quickly after a fission event, a small number of neutrons are only released by daughter nuclei after several further decays, up to 10-60 seconds after the fission was initiated. If one is therefore slightly beyond $k_{\mathrm{eff}}=1$, one therefore has many seconds to react until all the neutrons created in fission re-enter the fission cycle. Nevertheless, control rods in nuclear reactors absorbing neutrons – and therefore reducing $k_{\mathrm{eff}}$ – are designed in such a way that they are all the way in the reactor in at most 2 seconds.

            -

            One therefore has on the order of 10-60 seconds to regulate the nuclear reaction if $k_{\mathrm{eff}}$ should be larger than one for some time, as indicated by a growing neutron flux. Regulation can be achieved by continuously monitoring the neutron flux, and if necessary increase or reduce neutron flux by moving neutron-absorbing control rods a few millimeters into or out of the reactor. On a longer scale, the water cooling the reactor contains boron, a good neutron absorber. Every few hours, boron concentrations are adjusted by adding boron or diluting the coolant.

            +

            where $k_{\mathrm{eff}}=\frac 1{\lambda^F+1}$. Intuitively, $k_{\mathrm{eff}}$ is something like the multiplication factor for neutrons per typical time scale and should be less than or equal to one for stable operation of a reactor: if it is less than one, the chain reaction will die down, whereas nuclear bombs for example have a $k$-eigenvalue larger than one. A stable reactor should have $k_{\mathrm{eff}}=1$.

            +

            For those who wonder how this can be achieved in practice without inadvertently getting slightly larger than one and triggering a nuclear bomb: first, fission processes happen on different time scales. While most neutrons are released very quickly after a fission event, a small number of neutrons are only released by daughter nuclei after several further decays, up to 10-60 seconds after the fission was initiated. If one is therefore slightly beyond $k_{\mathrm{eff}}=1$, one therefore has many seconds to react until all the neutrons created in fission re-enter the fission cycle. Nevertheless, control rods in nuclear reactors absorbing neutrons – and therefore reducing $k_{\mathrm{eff}}$ – are designed in such a way that they are all the way in the reactor in at most 2 seconds.

            +

            One therefore has on the order of 10-60 seconds to regulate the nuclear reaction if $k_{\mathrm{eff}}$ should be larger than one for some time, as indicated by a growing neutron flux. Regulation can be achieved by continuously monitoring the neutron flux, and if necessary increase or reduce neutron flux by moving neutron-absorbing control rods a few millimeters into or out of the reactor. On a longer scale, the water cooling the reactor contains boron, a good neutron absorber. Every few hours, boron concentrations are adjusted by adding boron or diluting the coolant.

            Finally, some of the absorption and scattering reactions have some stability built in; for example, higher neutron fluxes result in locally higher temperatures, which lowers the density of water and therefore reduces the number of scatterers that are necessary to moderate neutrons from high to low energies before they can start fission events themselves.

            -

            In this tutorial program, we solve above $k$-eigenvalue problem for two energy groups, and we are looking for the largest multiplication factor $k_{\mathrm{eff}}$, which is proportional to the inverse of the minimum eigenvalue plus one. To solve the eigenvalue problem, we generally use a modified version of the inverse power method. The algorithm looks like this:

            +

            In this tutorial program, we solve above $k$-eigenvalue problem for two energy groups, and we are looking for the largest multiplication factor $k_{\mathrm{eff}}$, which is proportional to the inverse of the minimum eigenvalue plus one. To solve the eigenvalue problem, we generally use a modified version of the inverse power method. The algorithm looks like this:

            1. -

              Initialize $\phi_g$ and $k_{\mathrm{eff}}$ with $\phi_g^{(0)}$ and $k_{\mathrm{eff}}^{(0)}$ and let $n=1$.

              +

              Initialize $\phi_g$ and $k_{\mathrm{eff}}$ with $\phi_g^{(0)}$ and $k_{\mathrm{eff}}^{(0)}$ and let $n=1$.

            2. Define the so-called fission source by

              -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
     s_f^{(n-1)}(x)
     =
     \frac{1}{k_{\mathrm{eff}}^{(n-1)}}
     \sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}^{(n-1)}(x).
-  \end{eqnarray*} + \end{eqnarray*}" src="form_3766.png"/>

            3. -

              Solve for all group fluxes $\phi_g,g=1,\ldots,G$ using

              -\begin{eqnarray*}
+<p class=Solve for all group fluxes $\phi_g,g=1,\ldots,G$ using

              +\begin{eqnarray*}
     -\nabla \cdot D_g\nabla \phi_g^{(n)}
     +
     \Sigma_{r,g}\phi_g^{(n)}
@@ -284,111 +284,111 @@
     \sum_{g'< g} \Sigma_{s,g'\to g} \phi_{g'}^{(n)}
     +
     \sum_{g'> g}\Sigma_{s,g'\to g}\phi_{g'}^{(n-1)}.
-  \end{eqnarray*} + \end{eqnarray*}" src="form_3768.png"/>

            4. Update

              -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
     k_{\mathrm{eff}}^{(n)}
     =
     \sum_{g'=1}^G
     \int_{\Omega}\nu\Sigma_{f,g'}(x)
     \phi_{g'}^{(n)}(x)dx.
-  \end{eqnarray*} + \end{eqnarray*}" src="form_3769.png"/>

            5. -Compare $k_{\mathrm{eff}}^{(n)}$ with $k_{\mathrm{eff}}^{(n-1)}$. If the change greater than a prescribed tolerance then set $n=n+1$ repeat the iteration starting at step 2, otherwise end the iteration.
            6. +Compare $k_{\mathrm{eff}}^{(n)}$ with $k_{\mathrm{eff}}^{(n-1)}$. If the change greater than a prescribed tolerance then set $n=n+1$ repeat the iteration starting at step 2, otherwise end the iteration.
            -

            Note that in this scheme, we do not solve group fluxes exactly in each power iteration, but rather consider previously compute $\phi_{g'}^{(n)}$ only for down-scattering events $g'<g$. Up-scattering is only treated by using old iterators $\phi_{g'}^{(n-1)}$, in essence assuming that the scattering operator is triangular. This is physically motivated since up-scattering does not play a too important role in neutron scattering. In addition, practices shows that the inverse power iteration is stable even using this simplification.

            +

            Note that in this scheme, we do not solve group fluxes exactly in each power iteration, but rather consider previously compute $\phi_{g'}^{(n)}$ only for down-scattering events $g'<g$. Up-scattering is only treated by using old iterators $\phi_{g'}^{(n-1)}$, in essence assuming that the scattering operator is triangular. This is physically motivated since up-scattering does not play a too important role in neutron scattering. In addition, practices shows that the inverse power iteration is stable even using this simplification.

            Note also that one can use lots of extrapolation techniques to accelerate the power iteration laid out above. However, none of these are implemented in this example.

            Meshes and mesh refinement

            -

            One may wonder whether it is appropriate to solve for the solutions of the individual energy group equations on the same meshes. The question boils down to this: will $\phi_g$ and $\phi_{g'}$ have similar smoothness properties? If this is the case, then it is appropriate to use the same mesh for the two; a typical application could be chemical combustion, where typically the concentrations of all or most chemical species change rapidly within the flame front. As it turns out, and as will be apparent by looking at the graphs shown in the results section of this tutorial program, this isn't the case here, however: since the diffusion coefficient is different for different energy groups, fast neutrons (in bins with a small group number $g$) have a very smooth flux function, whereas slow neutrons (in bins with a large group number) are much more affected by the local material properties and have a correspondingly rough solution if the coefficient are rough as in the case we compute here. Consequently, we will want to use different meshes to compute each energy group.

            -

            This has two implications that we will have to consider: First, we need to find a way to refine the meshes individually. Second, assembling the source terms for the inverse power iteration, where we have to integrate solution $\phi_{g'}^{(n)}$ defined on mesh $g'$ against the shape functions defined on mesh $g$, becomes a much more complicated task.

            +

            One may wonder whether it is appropriate to solve for the solutions of the individual energy group equations on the same meshes. The question boils down to this: will $\phi_g$ and $\phi_{g'}$ have similar smoothness properties? If this is the case, then it is appropriate to use the same mesh for the two; a typical application could be chemical combustion, where typically the concentrations of all or most chemical species change rapidly within the flame front. As it turns out, and as will be apparent by looking at the graphs shown in the results section of this tutorial program, this isn't the case here, however: since the diffusion coefficient is different for different energy groups, fast neutrons (in bins with a small group number $g$) have a very smooth flux function, whereas slow neutrons (in bins with a large group number) are much more affected by the local material properties and have a correspondingly rough solution if the coefficient are rough as in the case we compute here. Consequently, we will want to use different meshes to compute each energy group.

            +

            This has two implications that we will have to consider: First, we need to find a way to refine the meshes individually. Second, assembling the source terms for the inverse power iteration, where we have to integrate solution $\phi_{g'}^{(n)}$ defined on mesh $g'$ against the shape functions defined on mesh $g$, becomes a much more complicated task.

            Mesh refinement

            We use the usual paradigm: solve on a given mesh, then evaluate an error indicator for each cell of each mesh we have. Because it is so convenient, we again use the a posteriori error estimator by Kelly, Gago, Zienkiewicz and Babuska which approximates the error per cell by integrating the jump of the gradient of the solution along the faces of each cell. Using this, we obtain indicators

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \eta_{g,K}, \qquad g=1,2,\ldots,G,\qquad K\in{\cal T}_g,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3776.png"/>

            -

            where ${\cal T}_g$ is the triangulation used in the solution of $\phi_g$. The question is what to do with this. For one, it is clear that refining only those cells with the highest error indicators might lead to bad results. To understand this, it is important to realize that $\eta_{g,K}$ scales with the second derivative of $\phi_g$. In other words, if we have two energy groups $g=1,2$ whose solutions are equally smooth but where one is larger by a factor of 10,000, for example, then only the cells of that mesh will be refined, whereas the mesh for the solution of small magnitude will remain coarse. This is probably not what one wants, since we can consider both components of the solution equally important.

            -

            In essence, we would therefore have to scale $\eta_{g,K}$ by an importance factor $z_g$ that says how important it is to resolve $\phi_g$ to any given accuracy. Such important factors can be computed using duality techniques (see, for example, the step-14 tutorial program, and the reference to the book by Bangerth and Rannacher cited there). We won't go there, however, and simply assume that all energy groups are equally important, and will therefore normalize the error indicators $\eta_{g,K}$ for group $g$ by the maximum of the solution $\phi_g$. We then refine the cells whose errors satisfy

            -\begin{eqnarray*}
+<p> where <picture><source srcset=${\cal T}_g$ is the triangulation used in the solution of $\phi_g$. The question is what to do with this. For one, it is clear that refining only those cells with the highest error indicators might lead to bad results. To understand this, it is important to realize that $\eta_{g,K}$ scales with the second derivative of $\phi_g$. In other words, if we have two energy groups $g=1,2$ whose solutions are equally smooth but where one is larger by a factor of 10,000, for example, then only the cells of that mesh will be refined, whereas the mesh for the solution of small magnitude will remain coarse. This is probably not what one wants, since we can consider both components of the solution equally important.

            +

            In essence, we would therefore have to scale $\eta_{g,K}$ by an importance factor $z_g$ that says how important it is to resolve $\phi_g$ to any given accuracy. Such important factors can be computed using duality techniques (see, for example, the step-14 tutorial program, and the reference to the book by Bangerth and Rannacher cited there). We won't go there, however, and simply assume that all energy groups are equally important, and will therefore normalize the error indicators $\eta_{g,K}$ for group $g$ by the maximum of the solution $\phi_g$. We then refine the cells whose errors satisfy

            +\begin{eqnarray*}
   \frac{\eta_{g,K}}{\|\phi_g\|_\infty}
   >
   \alpha_1
   \displaystyle{\max_{\begin{matrix}1\le g\le G \\ K\in {\cal T}_g\end{matrix}}
     \frac{\eta_{g,K}}{\|\phi_g\|_\infty}}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3780.png"/>

            and coarsen the cells where

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\eta_{g,K}}{\|\phi_g\|_\infty}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html	2024-03-17 21:57:45.007243958 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html	2024-03-17 21:57:45.015244008 +0000
@@ -139,43 +139,43 @@
 <p><a class=

            Problem setting

            The original purpose of this program is to simulate the focusing properties of an ultrasound wave generated by a transducer lens with variable geometry. Recent applications in medical imaging use ultrasound waves not only for imaging purposes, but also to excite certain local effects in a material, like changes in optical properties, that can then be measured by other imaging techniques. A vital ingredient for these methods is the ability to focus the intensity of the ultrasound wave in a particular part of the material, ideally in a point, to be able to examine the properties of the material at that particular location.

            To derive a model for this problem, we think of ultrasound as a pressure wave governed by the wave equation:

            -\[
+<picture><source srcset=\[
         \frac{\partial^2 U}{\partial t^2}       -       c^2 \Delta U = 0
-\] +\]" src="form_3832.png"/>

            -

            where $c$ is the wave speed (that for simplicity we assume to be constant), $U
-= U(x,t),\;x \in \Omega,\;t\in\mathrm{R}$. The boundary $\Gamma=\partial\Omega$ is divided into two parts $\Gamma_1$ and $\Gamma_2=\Gamma\setminus\Gamma_1$, with $\Gamma_1$ representing the transducer lens and $\Gamma_2$ an absorbing boundary (that is, we want to choose boundary conditions on $\Gamma_2$ in such a way that they imitate a larger domain). On $\Gamma_1$, the transducer generates a wave of constant frequency ${\omega}>0$ and constant amplitude (that we chose to be 1 here):

            -\[
+<p> where <picture><source srcset=$c$ is the wave speed (that for simplicity we assume to be constant), $U
+= U(x,t),\;x \in \Omega,\;t\in\mathrm{R}$. The boundary $\Gamma=\partial\Omega$ is divided into two parts $\Gamma_1$ and $\Gamma_2=\Gamma\setminus\Gamma_1$, with $\Gamma_1$ representing the transducer lens and $\Gamma_2$ an absorbing boundary (that is, we want to choose boundary conditions on $\Gamma_2$ in such a way that they imitate a larger domain). On $\Gamma_1$, the transducer generates a wave of constant frequency ${\omega}>0$ and constant amplitude (that we chose to be 1 here):

            +\[
 U(x,t) = \cos{\omega t}, \qquad x\in \Gamma_1
-\] +\]" src="form_3838.png"/>

            -

            If there are no other (interior or boundary) sources, and since the only source has frequency $\omega$, then the solution admits a separation of variables of the form $U(x,t) = \textrm{Re}\left(u(x)\,e^{i\omega
-t})\right)$. The complex-valued function $u(x)$ describes the spatial dependency of amplitude and phase (relative to the source) of the waves of frequency ${\omega}$, with the amplitude being the quantity that we are interested in. By plugging this form of the solution into the wave equation, we see that for $u$ we have

            -\begin{eqnarray*}
+<p>If there are no other (interior or boundary) sources, and since the only source has frequency <picture><source srcset=$\omega$, then the solution admits a separation of variables of the form $U(x,t) = \textrm{Re}\left(u(x)\,e^{i\omega
+t})\right)$. The complex-valued function $u(x)$ describes the spatial dependency of amplitude and phase (relative to the source) of the waves of frequency ${\omega}$, with the amplitude being the quantity that we are interested in. By plugging this form of the solution into the wave equation, we see that for $u$ we have

            +\begin{eqnarray*}
 -\omega^2 u(x) - c^2\Delta u(x) &=& 0, \qquad x\in\Omega,\\
 u(x) &=& 1,  \qquad x\in\Gamma_1.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3843.png"/>

            -

            For finding suitable conditions on $\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

            -\[
+<p>For finding suitable conditions on <picture><source srcset=$\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

            +\[
 c (n\cdot\nabla V) + \frac{\partial V}{\partial t} = (i\, c\, |k| - i\, \omega) V = 0.
-\] +\]" src="form_3849.png"/>

            Hence, by enforcing the boundary condition

            -\[
+<picture><source srcset=\[
 c (n\cdot\nabla U) + \frac{\partial U}{\partial t} = 0, \qquad x\in\Gamma_2,
-\] +\]" src="form_3850.png"/>

            -

            waves that hit the boundary $\Gamma_2$ at a right angle will be perfectly absorbed. On the other hand, those parts of the wave field that do not hit a boundary at a right angle do not satisfy this condition and enforcing it as a boundary condition will yield partial reflections, i.e. only parts of the wave will pass through the boundary as if it wasn't here whereas the remaining fraction of the wave will be reflected back into the domain.

            -

            If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$:

            -\begin{eqnarray*}
+<p> waves that hit the boundary <picture><source srcset=$\Gamma_2$ at a right angle will be perfectly absorbed. On the other hand, those parts of the wave field that do not hit a boundary at a right angle do not satisfy this condition and enforcing it as a boundary condition will yield partial reflections, i.e. only parts of the wave will pass through the boundary as if it wasn't here whereas the remaining fraction of the wave will be reflected back into the domain.

            +

            If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$:

            +\begin{eqnarray*}
 -\omega^2 u - c^2\Delta u &=& 0, \qquad x\in\Omega,\\
 c (n\cdot\nabla u) + i\,\omega\,u &=&0, \qquad x\in\Gamma_2,\\
 u &=& 1,  \qquad x\in\Gamma_1.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3851.png"/>

            -

            This is a Helmholtz equation (similar to the one in step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

            -\begin{eqnarray*}
+<p> This is a Helmholtz equation (similar to the one in <a class=step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

            +\begin{eqnarray*}
   \left.\begin{array}{ccc}
     -\omega^2 v - c^2\Delta v &=& 0 \quad\\
     -\omega^2 w - c^2\Delta w &=& 0 \quad
@@ -190,26 +190,26 @@
     v &=& 1 \quad\\
     w &=& 0 \quad
   \end{array}\right\} &\;& x\in\Gamma_1.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3853.png"/>

            -

            For test functions $\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

            -\begin{eqnarray*}
+<p>For test functions <picture><source srcset=$\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

            +\begin{eqnarray*}
 -\omega^2 \langle \phi, v \rangle_{\mathrm{L}^2(\Omega)}
 + c^2 \langle \nabla \phi, \nabla v \rangle_{\mathrm{L}^2(\Omega)}
 - c \omega \langle \phi, w \rangle_{\mathrm{L}^2(\Gamma_2)} &=& 0, \\
 -\omega^2 \langle \psi, w \rangle_{\mathrm{L}^2(\Omega)}
 + c^2 \langle \nabla \psi, \nabla w \rangle_{\mathrm{L}^2(\Omega)}
 + c \omega \langle \psi, v \rangle_{\mathrm{L}^2(\Gamma_2)} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3856.png"/>

            -

            We choose finite element spaces $V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
-\{\psi_j\}_{j=1}^n$ and look for approximate solutions

            -\[
+<p>We choose finite element spaces <picture><source srcset=$V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
+\{\psi_j\}_{j=1}^n$ and look for approximate solutions

            +\[
 v_h = \sum_{j=1}^n \alpha_j \phi_j, \;\; w_h = \sum_{j=1}^n \beta_j \psi_j.
-\] +\]" src="form_3859.png"/>

            Plugging into the variational form yields the equation system

            -\[
+<picture><source srcset=\[
 \renewcommand{\arraystretch}{2.0}
 \left.\begin{array}{ccc}
 \sum_{j=1}^n
@@ -232,10 +232,10 @@
 \right)\alpha_j
 &=& 0
 \end{array}\right\}\;\;\forall\; i =1,\ldots,n.
-\] +\]" src="form_3860.png"/>

            In matrix notation:

            -\[
+<picture><source srcset=\[
 \renewcommand{\arraystretch}{2.0}
 \left(
 \begin{array}{cc}
@@ -258,12 +258,12 @@
 0 \\ 0
 \end{array}
 \right)
-\] +\]" src="form_3861.png"/>

            -

            (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

            +

            (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

            The test case

            -

            For the computations, we will consider wave propagation in the unit square, with ultrasound generated by a transducer lens that is shaped like a segment of the circle with center at $(0.5, d)$ and a radius slightly greater than $d$; this shape should lead to a focusing of the sound wave at the center of the circle. Varying $d$ changes the "focus" of the lens and affects the spatial distribution of the intensity of $u$, where our main concern is how well $|u|=\sqrt{v^2+w^2}$ is focused.

            -

            In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

            +

            For the computations, we will consider wave propagation in the unit square, with ultrasound generated by a transducer lens that is shaped like a segment of the circle with center at $(0.5, d)$ and a radius slightly greater than $d$; this shape should lead to a focusing of the sound wave at the center of the circle. Varying $d$ changes the "focus" of the lens and affects the spatial distribution of the intensity of $u$, where our main concern is how well $|u|=\sqrt{v^2+w^2}$ is focused.

            +

            In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

            The commented program

            Include files

            The following header files have all been discussed before:

            @@ -310,7 +310,7 @@

        The DirichletBoundaryValues class

        First we define a class for the function representing the Dirichlet boundary values. This has been done many times before and therefore does not need much explanation.

        -

        Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

        +

        Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

          template <int dim>
          class DirichletBoundaryValues : public Function<dim>
          {
        @@ -369,7 +369,7 @@

        The declare_parameters function declares all the parameters that our ParameterHandler object will be able to read from input files, along with their types, range conditions and the subsections they appear in. We will wrap all the entries that go into a section in a pair of braces to force the editor to indent them by one level, making it simpler to read which entries together form a section:

          void ParameterReader::declare_parameters()
          {
        -

        Parameters for mesh and geometry include the number of global refinement steps that are applied to the initial coarse mesh and the focal distance $d$ of the transducer lens. For the number of refinement steps, we allow integer values in the range $[0,\infty)$, where the omitted second argument to the Patterns::Integer object denotes the half-open interval. For the focal distance any number greater than zero is accepted:

        +

        Parameters for mesh and geometry include the number of global refinement steps that are applied to the initial coarse mesh and the focal distance $d$ of the transducer lens. For the number of refinement steps, we allow integer values in the range $[0,\infty)$, where the omitted second argument to the Patterns::Integer object denotes the half-open interval. For the focal distance any number greater than zero is accepted:

          prm.enter_subsection("Mesh & geometry parameters");
          {
          prm.declare_entry("Number of refinements",
        @@ -388,7 +388,7 @@
         
        -

        The next subsection is devoted to the physical parameters appearing in the equation, which are the frequency $\omega$ and wave speed $c$. Again, both need to lie in the half-open interval $[0,\infty)$ represented by calling the Patterns::Double class with only the left end-point as argument:

        +

        The next subsection is devoted to the physical parameters appearing in the equation, which are the frequency $\omega$ and wave speed $c$. Again, both need to lie in the half-open interval $[0,\infty)$ represented by calling the Patterns::Double class with only the left end-point as argument:

          prm.enter_subsection("Physical constants");
          {
          prm.declare_entry("c", "1.5e5", Patterns::Double(0), "Wave speed");
        @@ -426,8 +426,8 @@
         
         

        The ComputeIntensity class

        -

        As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

        -

        So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

        +

        As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

        +

        So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

        In practice, the DataPostprocessor class only provides an interface to this functionality, and we need to derive our own class from it in order to implement the functions specified by the interface. In the most general case one has to implement several member functions but if the output quantity is a single scalar then some of this boilerplate code can be handled by a more specialized class, DataPostprocessorScalar and we can derive from that one instead. This is what the ComputeIntensity class does:

          template <int dim>
          class ComputeIntensity : public DataPostprocessorScalar<dim>
        @@ -443,8 +443,8 @@
        virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const
        -

        In the constructor, we need to call the constructor of the base class with two arguments. The first denotes the name by which the single scalar quantity computed by this class should be represented in output files. In our case, the postprocessor has $|u|$ as output, so we use "Intensity".

        -

        The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

        +

        In the constructor, we need to call the constructor of the base class with two arguments. The first denotes the name by which the single scalar quantity computed by this class should be represented in output files. In our case, the postprocessor has $|u|$ as output, so we use "Intensity".

        +

        The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

          template <int dim>
          ComputeIntensity<dim>::ComputeIntensity()
          : DataPostprocessorScalar<dim>("Intensity", update_values)
        @@ -452,7 +452,7 @@
         
         
        @ update_values
        Shape function values.
        -

        The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

        +

        The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

          template <int dim>
          void ComputeIntensity<dim>::evaluate_vector_field(
        @@ -460,7 +460,7 @@
          {
          AssertDimension(computed_quantities.size(), inputs.solution_values.size());
         
        -

        The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

        +

        The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

          for (unsigned int p = 0; p < computed_quantities.size(); ++p)
          {
          AssertDimension(computed_quantities[p].size(), 1);
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 2024-03-17 21:57:45.079244403 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 2024-03-17 21:57:45.079244403 +0000 @@ -137,99 +137,99 @@
        Note
        The material presented here is also discussed in video lecture 10. (All video lectures are also available here.)

        The basic set up of finite element methods

        This is the first example where we actually use finite elements to compute something. We will solve a simple version of Poisson's equation with zero boundary values, but a nonzero right hand side:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\Delta u &= f \qquad\qquad & \text{in}\ \Omega,
   \\
   u &= 0 \qquad\qquad & \text{on}\ \partial\Omega.
-\end{align*} +\end{align*}" src="form_3877.png"/>

        -

        We will solve this equation on the square, $\Omega=[-1,1]^2$, for which you've already learned how to generate a mesh in step-1 and step-2. In this program, we will also only consider the particular case $f(\mathbf x)=1$ and come back to how to implement the more general case in the next tutorial program, step-4.

        -

        If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

        -\begin{align*}
+<p> We will solve this equation on the square, <picture><source srcset=$\Omega=[-1,1]^2$, for which you've already learned how to generate a mesh in step-1 and step-2. In this program, we will also only consider the particular case $f(\mathbf x)=1$ and come back to how to implement the more general case in the next tutorial program, step-4.

        +

        If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

        +\begin{align*}
   -\int_\Omega \varphi \Delta u = \int_\Omega \varphi f.
-\end{align*} +\end{align*}" src="form_3880.png"/>

        This can be integrated by parts:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   \int_\Omega \nabla\varphi \cdot \nabla u
   -
   \int_{\partial\Omega} \varphi \mathbf{n}\cdot \nabla u
    = \int_\Omega \varphi f.
-\end{align*} +\end{align*}" src="form_3881.png"/>

        -

        The test function $\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

        -\begin{align*}
+<p> The test function <picture><source srcset=$\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

        +\begin{align*}
   (\nabla\varphi, \nabla u)
    = (\varphi, f),
-\end{align*} +\end{align*}" src="form_3883.png"/>

        -

        where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

        -

        Of course we can't find such a function on a computer in the general case, and instead we seek an approximation $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf
-x)$, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

        +

        where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

        +

        Of course we can't find such a function on a computer in the general case, and instead we seek an approximation $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf
+x)$, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

        • A mesh on which to define shape functions. You have already seen how to generate and manipulate the objects that describe meshes in step-1 and step-2.
        • -
        • A finite element that describes the shape functions we want to use on the reference cell (which in deal.II is always the unit interval $[0,1]$, the unit square $[0,1]^2$ or the unit cube $[0,1]^3$, depending on which space dimension you work in). In step-2, we had already used an object of type FE_Q<2>, which denotes the usual Lagrange elements that define shape functions by interpolation on support points. The simplest one is FE_Q<2>(1), which uses polynomial degree 1. In 2d, these are often referred to as bilinear, since they are linear in each of the two coordinates of the reference cell. (In 1d, they would be linear and in 3d tri-linear; however, in the deal.II documentation, we will frequently not make this distinction and simply always call these functions "linear".)
        • +
        • A finite element that describes the shape functions we want to use on the reference cell (which in deal.II is always the unit interval $[0,1]$, the unit square $[0,1]^2$ or the unit cube $[0,1]^3$, depending on which space dimension you work in). In step-2, we had already used an object of type FE_Q<2>, which denotes the usual Lagrange elements that define shape functions by interpolation on support points. The simplest one is FE_Q<2>(1), which uses polynomial degree 1. In 2d, these are often referred to as bilinear, since they are linear in each of the two coordinates of the reference cell. (In 1d, they would be linear and in 3d tri-linear; however, in the deal.II documentation, we will frequently not make this distinction and simply always call these functions "linear".)
        • A DoFHandler object that enumerates all the degrees of freedom on the mesh, taking the reference cell description the finite element object provides as the basis. You've also already seen how to do this in step-2.
        • A mapping that tells how the shape functions on the real cell are obtained from the shape functions defined by the finite element class on the reference cell. By default, unless you explicitly say otherwise, deal.II will use a (bi-, tri-)linear mapping for this, so in most cases you don't have to worry about this step.
        -

        Through these steps, we now have a set of functions $\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

        -\begin{align*}
+<p>Through these steps, we now have a set of functions <picture><source srcset=$\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

        +\begin{align*}
   (\nabla\varphi_i, \nabla u_h)
    = (\varphi_i, f),
    \qquad\qquad
    i=0\ldots N-1.
-\end{align*} +\end{align*}" src="form_3887.png"/>

        -

        Note that we here follow the convention that everything is counted starting at zero, as common in C and C++. This equation can be rewritten as a linear system if you insert the representation $u_h(\mathbf x)=\sum_j U_j
-\varphi_j(\mathbf x)$ and then observe that

        -\begin{align*}
+<p> Note that we here follow the convention that everything is counted starting at zero, as common in C and C++. This equation can be rewritten as a linear system if you insert the representation <picture><source srcset=$u_h(\mathbf x)=\sum_j U_j
+\varphi_j(\mathbf x)$ and then observe that

        +\begin{align*}
   (\nabla\varphi_i, \nabla u_h)
   &= \left(\nabla\varphi_i, \nabla \Bigl[\sum_j U_j \varphi_j\Bigr]\right)
 \\
   &= \sum_j \left(\nabla\varphi_i, \nabla \left[U_j \varphi_j\right]\right)
 \\
   &= \sum_j \left(\nabla\varphi_i, \nabla \varphi_j \right) U_j.
-\end{align*} +\end{align*}" src="form_3889.png"/>

        With this, the problem reads: Find a vector $U$ so that

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   A U = F,
-\end{align*} +\end{align*}" src="form_3890.png"/>

        -

        where the matrix $A$ and the right hand side $F$ are defined as

        -\begin{align*}
+<p> where the matrix <picture><source srcset=$A$ and the right hand side $F$ are defined as

        +\begin{align*}
   A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j),
   \\
   F_i &= (\varphi_i, f).
-\end{align*} +\end{align*}" src="form_3891.png"/>

        Should we multiply by a test function from the left or from the right?

        Before we move on with describing how these quantities can be computed, note that if we had multiplied the original equation from the right by a test function rather than from the left, then we would have obtained a linear system of the form

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   U^T A = F^T
-\end{align*} +\end{align*}" src="form_3892.png"/>

        -

        with a row vector $F^T$. By transposing this system, this is of course equivalent to solving

        -\begin{align*}
+<p> with a row vector <picture><source srcset=$F^T$. By transposing this system, this is of course equivalent to solving

        +\begin{align*}
   A^T U = F
-\end{align*} +\end{align*}" src="form_3894.png"/>

        -

        which here is the same as above since $A=A^T$. But in general is not, and in order to avoid any sort of confusion, experience has shown that simply getting into the habit of multiplying the equation from the left rather than from the right (as is often done in the mathematical literature) avoids a common class of errors as the matrix is automatically correct and does not need to be transposed when comparing theory and implementation. See step-9 for the first example in this tutorial where we have a non-symmetric bilinear form for which it makes a difference whether we multiply from the right or from the left.

        +

        which here is the same as above since $A=A^T$. But in general is not, and in order to avoid any sort of confusion, experience has shown that simply getting into the habit of multiplying the equation from the left rather than from the right (as is often done in the mathematical literature) avoids a common class of errors as the matrix is automatically correct and does not need to be transposed when comparing theory and implementation. See step-9 for the first example in this tutorial where we have a non-symmetric bilinear form for which it makes a difference whether we multiply from the right or from the left.

        Assembling the matrix and right hand side vector

        -

        Now we know what we need (namely: objects that hold the matrix and vectors, as well as ways to compute $A_{ij},F_i$), and we can look at what it takes to make that happen:

        +

        Now we know what we need (namely: objects that hold the matrix and vectors, as well as ways to compute $A_{ij},F_i$), and we can look at what it takes to make that happen:

          -
        • The object for $A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.
        • -
        • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

          -\begin{align*}
+<li>The object for <picture><source srcset=$A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.

        • +
        • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

          +\begin{align*}
     A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j)
     = \sum_{K \in {\mathbb T}} \int_K \nabla\varphi_i \cdot \nabla \varphi_j,
     \\
     F_i &= (\varphi_i, f)
     = \sum_{K \in {\mathbb T}} \int_K \varphi_i f,
-  \end{align*} + \end{align*}" src="form_3897.png"/>

          and then approximate each cell's contribution by quadrature:

          -\begin{align*}
+<picture><source srcset=\begin{align*}
     A^K_{ij} &=
     \int_K \nabla\varphi_i \cdot \nabla \varphi_j
     \approx
@@ -240,11 +240,11 @@
     \int_K \varphi_i f
     \approx
     \sum_q \varphi_i(\mathbf x^K_q) f(\mathbf x^K_q) w^K_q,
-  \end{align*} + \end{align*}" src="form_3898.png"/>

          - where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next.
        • -
        • First, we need a way to describe the location $\mathbf x_q^K$ of quadrature points and their weights $w^K_q$. They are usually mapped from the reference cell in the same way as shape functions, i.e., implicitly using the MappingQ1 class or, if you explicitly say so, through one of the other classes derived from Mapping. The locations and weights on the reference cell are described by objects derived from the Quadrature base class. Typically, one chooses a quadrature formula (i.e. a set of points and weights) so that the quadrature exactly equals the integral in the matrix; this can be achieved because all factors in the integral are polynomial, and is done by Gaussian quadrature formulas, implemented in the QGauss class.
        • -
        • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.
        • + where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next. +
        • First, we need a way to describe the location $\mathbf x_q^K$ of quadrature points and their weights $w^K_q$. They are usually mapped from the reference cell in the same way as shape functions, i.e., implicitly using the MappingQ1 class or, if you explicitly say so, through one of the other classes derived from Mapping. The locations and weights on the reference cell are described by objects derived from the Quadrature base class. Typically, one chooses a quadrature formula (i.e. a set of points and weights) so that the quadrature exactly equals the integral in the matrix; this can be achieved because all factors in the integral are polynomial, and is done by Gaussian quadrature formulas, implemented in the QGauss class.
        • +
        • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.

        The process of computing the matrix and right hand side as a sum over all cells (and then a sum over quadrature points) is usually called assembling the linear system, or assembly for short, using the meaning of the word related to assembly line, meaning "the act of putting together a set of pieces, fragments, or elements".

        FEValues really is the central class in the assembly process. One way you can view it is as follows: The FiniteElement and derived classes describe shape functions, i.e., infinite dimensional objects: functions have values at every point. We need this for theoretical reasons because we want to perform our analysis with integrals over functions. However, for a computer, this is a very difficult concept, since they can in general only deal with a finite amount of information, and so we replace integrals by sums over quadrature points that we obtain by mapping (the Mapping object) using points defined on a reference cell (the Quadrature object) onto points on the real cell. In essence, we reduce the problem to one where we only need a finite amount of information, namely shape function values and derivatives, quadrature weights, normal vectors, etc, exclusively at a finite set of points. The FEValues class is the one that brings the three components together and provides this finite set of information on a particular cell $K$. You will see it in action when we assemble the linear system below.

        @@ -252,17 +252,17 @@

        The final piece of this introduction is to mention that after a linear system is obtained, it is solved using an iterative solver and then postprocessed: we create an output file using the DataOut class that can then be visualized using one of the common visualization programs.

        Note
        The preceding overview of all the important steps of any finite element implementation has its counterpart in deal.II: The library can naturally be grouped into a number of "modules" that cover the basic concepts just outlined. You can access these modules through the tab at the top of this page. An overview of the most fundamental groups of concepts is also available on the front page of the deal.II manual.

        Solving the linear system

        -

        For a finite element program, the linear system we end up with here is relatively small: The matrix has size $1089 \times 1089$, owing to the fact that the mesh we use is $32\times 32$ and so there are $33^2=1089$ vertices in the mesh. In many of the later tutorial programs, matrix sizes in the range of tens of thousands to hundreds of thousands will not be uncommon, and with codes such as ASPECT that build on deal.II, we regularly solve problems with more than a hundred million equations (albeit using parallel computers). In any case, even for the small system here, the matrix is much larger than what one typically encounters in an undergraduate or most graduate courses, and so the question arises how we can solve such linear systems.

        -

        The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

        +

        For a finite element program, the linear system we end up with here is relatively small: The matrix has size $1089 \times 1089$, owing to the fact that the mesh we use is $32\times 32$ and so there are $33^2=1089$ vertices in the mesh. In many of the later tutorial programs, matrix sizes in the range of tens of thousands to hundreds of thousands will not be uncommon, and with codes such as ASPECT that build on deal.II, we regularly solve problems with more than a hundred million equations (albeit using parallel computers). In any case, even for the small system here, the matrix is much larger than what one typically encounters in an undergraduate or most graduate courses, and so the question arises how we can solve such linear systems.

        +

        The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

        One can rescue the situation somewhat by realizing that only a relatively small number of entries in the matrix are nonzero – that is, the matrix is sparse. Variations of Gaussian elimination can exploit this, making the process substantially faster; we will use one such method – implemented in the SparseDirectUMFPACK class – in step-29 for the first time, among several others than come after that. These variations of Gaussian elimination might get us to problem sizes on the order of 100,000 or 200,000, but not all that much beyond that.

        -

        Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

        +

        Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

        Finite element codes therefore almost always use iterative solvers such as CG for the solution of the linear systems, and we will do so in this code as well. (We note that the CG method is only usable for matrices that are symmetric and positive definite; for other equations, the matrix may not have these properties and we will have to use other variations of iterative solvers such as BiCGStab or GMRES that are applicable to more general matrices.)

        -

        An important component of these iterative solvers is that we specify the tolerance with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution $\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

        -\begin{align*}
+<p>An important component of these iterative solvers is that we specify the <em>tolerance</em> with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution <picture><source srcset=$\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

        +\begin{align*}
   \tau = 10^{-6} \|b\|
-\end{align*} +\end{align*}" src="form_3915.png"/>

        -

        is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

        +

        is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

        All of this will be implemented in the Step3::solve() function in this program. As you will see, it is quite simple to set up linear solvers with deal.II: The whole function will have only three lines.

        About the implementation

        Although this is the simplest possible equation you can solve using the finite element method, this program shows the basic structure of most finite element programs and also serves as the template that almost all of the following programs will essentially follow. Specifically, the main class of this program looks like this:

        class Step3
        @@ -304,7 +304,7 @@
      • assemble_system(): This, then is where the contents of the matrix and right hand side are computed, as discussed at length in the introduction above. Since doing something with this linear system is conceptually very different from computing its entries, we separate it from the following function.
      • -solve(): This then is the function in which we compute the solution $U$ of the linear system $AU=F$. In the current program, this is a simple task since the matrix is so simple, but it will become a significant part of a program's size whenever the problem is not so trivial any more (see, for example, step-20, step-22, or step-31 once you've learned a bit more about the library).
      • +solve(): This then is the function in which we compute the solution $U$ of the linear system $AU=F$. In the current program, this is a simple task since the matrix is so simple, but it will become a significant part of a program's size whenever the problem is not so trivial any more (see, for example, step-20, step-22, or step-31 once you've learned a bit more about the library).
      • output_results(): Finally, when you have computed a solution, you probably want to do something with it. For example, you may want to output it in a format that can be visualized, or you may want to compute quantities you are interested in: say, heat fluxes in a heat exchanger, air friction coefficients of a wing, maximum bridge loads, or simply the value of the numerical solution at a point. This function is therefore the place for postprocessing your solution.
      @@ -314,7 +314,7 @@

      deal.II defines a number of integral types via alias in namespace types. (In the previous sentence, the word "integral" is used as the adjective that corresponds to the noun "integer". It shouldn't be confused with the noun "integral" that represents the area or volume under a curve or surface. The adjective "integral" is widely used in the C++ world in contexts such as "integral type", "integral constant", etc.) In particular, in this program you will see types::global_dof_index in a couple of places: an integer type that is used to denote the global index of a degree of freedom, i.e., the index of a particular degree of freedom within the DoFHandler object that is defined on top of a triangulation (as opposed to the index of a particular degree of freedom within a particular cell). For the current program (as well as almost all of the tutorial programs), you will have a few thousand to maybe a few million unknowns globally (and, for $Q_1$ elements, you will have 4 locally on each cell in 2d and 8 in 3d). Consequently, a data type that allows to store sufficiently large numbers for global DoF indices is unsigned int given that it allows to store numbers between 0 and slightly more than 4 billion (on most systems, where integers are 32-bit). In fact, this is what types::global_dof_index is.

      So, why not just use unsigned int right away? deal.II used to do this until version 7.3. However, deal.II supports very large computations (via the framework discussed in step-40) that may have more than 4 billion unknowns when spread across a few thousand processors. Consequently, there are situations where unsigned int is not sufficiently large and we need a 64-bit unsigned integral type. To make this possible, we introduced types::global_dof_index which by default is defined as simply unsigned int whereas it is possible to define it as unsigned long long int if necessary, by passing a particular flag during configuration (see the ReadMe file).

      This covers the technical aspect. But there is also a documentation purpose: everywhere in the library and codes that are built on it, if you see a place using the data type types::global_dof_index, you immediately know that the quantity that is being referenced is, in fact, a global dof index. No such meaning would be apparent if we had just used unsigned int (which may also be a local index, a boundary indicator, a material id, etc.). Immediately knowing what a variable refers to also helps avoid errors: it's quite clear that there must be a bug if you see an object of type types::global_dof_index being assigned to variable of type types::subdomain_id, even though they are both represented by unsigned integers and the compiler will, consequently, not complain.

      -

      In more practical terms what the presence of this type means is that during assembly, we create a $4\times 4$ matrix (in 2d, using a $Q_1$ element) of the contributions of the cell we are currently sitting on, and then we need to add the elements of this matrix to the appropriate elements of the global (system) matrix. For this, we need to get at the global indices of the degrees of freedom that are local to the current cell, for which we will always use the following piece of the code:

      cell->get_dof_indices (local_dof_indices);
      +

      In more practical terms what the presence of this type means is that during assembly, we create a $4\times 4$ matrix (in 2d, using a $Q_1$ element) of the contributions of the cell we are currently sitting on, and then we need to add the elements of this matrix to the appropriate elements of the global (system) matrix. For this, we need to get at the global indices of the degrees of freedom that are local to the current cell, for which we will always use the following piece of the code:

      cell->get_dof_indices (local_dof_indices);

      where local_dof_indices is declared as

      std::vector<types::global_dof_index> local_dof_indices (fe.n_dofs_per_cell());

      The name of this variable might be a bit of a misnomer – it stands for "the global indices of those degrees of freedom locally defined on the current /usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-03-17 21:57:45.155244872 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-03-17 21:57:45.155244872 +0000 @@ -218,9 +218,9 @@

      Motivation

      Adaptive local refinement is used to obtain fine meshes which are well adapted to solving the problem at hand efficiently. In short, the size of cells which produce a large error is reduced to obtain a better approximation of the solution to the problem at hand. However, a lot of problems contain anisotropic features. Prominent examples are shocks or boundary layers in compressible viscous flows. An efficient mesh approximates these features with cells of higher aspect ratio which are oriented according to the mentioned features. Using only isotropic refinement, the aspect ratios of the original mesh cells are preserved, as they are inherited by the children of a cell. Thus, starting from an isotropic mesh, a boundary layer will be refined in order to catch the rapid variation of the flow field in the wall normal direction, thus leading to cells with very small edge lengths both in normal and tangential direction. Usually, much higher edge lengths in tangential direction and thus significantly less cells could be used without a significant loss in approximation accuracy. An anisotropic refinement process can modify the aspect ratio from mother to child cells by a factor of two for each refinement step. In the course of several refinements, the aspect ratio of the fine cells can be optimized, saving a considerable number of cells and correspondingly degrees of freedom and thus computational resources, memory as well as CPU time.

      Implementation

      -

      Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

      -

      In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

      -

      Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

      +

      Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

      +

      In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

      +

      Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

      After anisotropic refinement, a coarser neighbor is not necessarily exactly one level below ours, but can pretty much have any level relative to the current one; in fact, it can even be on a higher level even though it is coarser. Thus the decisions have to be made on a different basis, whereas the intention of the decisions stays the same.

      In the following, we will discuss the cases that can happen when we want to compute contributions to the matrix (or right hand side) of the form

      \[
@@ -231,7 +231,7 @@
 <ul>
 <li>
 <p class=Finer neighbor: If we are on an active cell and want to integrate over a face $f\subset \partial K$, the first possibility is that the neighbor behind this face is more refined, i.e. has children occupying only part of the common face. In this case, the face under consideration has to be a refined one, which can determine by asking if (face->has_children()). If this is true, we need to loop over all subfaces and get the neighbors' child behind this subface, so that we can reinit an FEFaceValues object with the neighbor and an FESubfaceValues object with our cell and the respective subface.

      -

      For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

        +

        For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

        • If the neighbor is refined more than once anisotropically, it might be that here are not two or four but actually three subfaces to consider. Imagine the following refinement process of the (two-dimensional) face of the (three-dimensional) neighbor cell we are considering: first the face is refined along x, later on only the left subface is refined along y.

          -------* *---*---* *---*---*
          | | | | | | | |
          @@ -253,7 +253,7 @@
          # # # + + +
          # ## + ++
          ############# +++++++++++++
          -

          Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

          +

      Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

    5. However, fortunately, CellAccessor::neighbor_child_on_subface() takes care of these situations by itself, if you loop over the correct number of subfaces, in the above example this is two. The FESubfaceValues<dim>::reinit function takes care of this too, so that the resulting state is always correct. There is one little caveat, however: For reiniting the neighbors FEFaceValues object you need to know the index of the face that points toward the current cell. Usually you assume that the neighbor you get directly is as coarse or as fine as you, if it has children, thus this information can be obtained with CellAccessor::neighbor_of_neighbor(). If the neighbor is coarser, however, you would have to use the first value in CellAccessor::neighbor_of_coarser_neighbor() instead. In order to make this easy for you, there is CellAccessor::neighbor_face_no() which does the correct thing for you and returns the desired result.

      @@ -294,12 +294,12 @@

    This approach is similar to the one we have used in step-27 for hp-refinement and has the great advantage of flexibility: Any error indicator can be used in the anisotropic process, i.e. if you have quite involved a posteriori goal-oriented error indicators available you can use them as easily as a simple Kelly error estimator. The anisotropic part of the refinement process is not influenced by this choice. Furthermore, simply leaving out the third and forth steps leads to the same isotropic refinement you used to get before any anisotropic changes in deal.II or your application program. As a last advantage, working only on cells flagged for refinement results in a faster evaluation of the anisotropic indicator, which can become noticeable on finer meshes with a lot of cells if the indicator is quite involved.

    Here, we use a very simple approach which is only applicable to DG methods. The general idea is quite simple: DG methods allow the discrete solution to jump over the faces of a cell, whereas it is smooth within each cell. Of course, in the limit we expect that the jumps tend to zero as we refine the mesh and approximate the true solution better and better. Thus, a large jump across a given face indicates that the cell should be refined (at least) orthogonally to that face, whereas a small jump does not lead to this conclusion. It is possible, of course, that the exact solution is not smooth and that it also features a jump. In that case, however, a large jump over one face indicates, that this face is more or less parallel to the jump and in the vicinity of it, thus again we would expect a refinement orthogonal to the face under consideration to be effective.

    -

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    +

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    \[
 K_j = \frac{\sum_{i=1}^2 \int_{f_i^j}|[u]| dx}{\sum_{i=1}^2 |f_i^j|} .
 \]

    -

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    +

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    Such a criterion is easily generalized to systems of equations: the absolute value of the jump would be replaced by an appropriate norm of the vector-valued jump.

    The problem

    We solve the linear transport equation presented in step-12. The domain is extended to cover $[-1,1]\times[0,1]$ in 2D, where the flow field $\beta$ describes a counterclockwise quarter circle around the origin in the right half of the domain and is parallel to the x-axis in the left part of the domain. The inflow boundary is again located at $x=1$ and along the positive part of the x-axis, and the boundary conditions are chosen as in step-12.

    @@ -382,7 +382,7 @@
    Function::value_list
    virtual void value_list(const std::vector< Point< dim > > &points, std::vector< RangeNumberType > &values, const unsigned int component=0) const
    Point
    Definition point.h:112
    AssertDimension
    #define AssertDimension(dim1, dim2)
    Definition exceptions.h:1787
    -

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

    +

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

      void value_list(const std::vector<Point<dim>> &points,
      std::vector<Point<dim>> & values) const
      {
    @@ -1336,7 +1336,7 @@

    We see, that the solution on the anisotropically refined mesh is very similar to the solution obtained on the isotropically refined mesh. Thus the anisotropic indicator seems to effectively select the appropriate cells for anisotropic refinement.

    -

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    +

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    It might seem that the necessary alignment of anisotropic features and the coarse mesh can decrease performance significantly for real world problems. That is not wrong in general: If one were, for example, to apply anisotropic refinement to problems in which shocks appear (e.g., the equations solved in step-69), then it many cases the shock is not aligned with the mesh and anisotropic refinement will help little unless one also introduces techniques to move the mesh in alignment with the shocks. On the other hand, many steep features of solutions are due to boundary layers. In those cases, the mesh is already aligned with the anisotropic features because it is of course aligned with the boundary itself, and anisotropic refinement will almost always increase the efficiency of computations on adapted grids for these cases.

    The plain program

    /* ---------------------------------------------------------------------
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-03-17 21:57:45.283245663 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-03-17 21:57:45.279245638 +0000 @@ -174,7 +174,7 @@

    The Boussinesq equations

    This program deals with an interesting physical problem: how does a fluid (i.e., a liquid or gas) behave if it experiences differences in buoyancy caused by temperature differences? It is clear that those parts of the fluid that are hotter (and therefore lighter) are going to rise up and those that are cooler (and denser) are going to sink down with gravity.

    In cases where the fluid moves slowly enough such that inertial effects can be neglected, the equations that describe such behavior are the Boussinesq equations that read as follows:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho\; \beta \; T\; \mathbf{g},
   \\
@@ -185,49 +185,49 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3945.png"/>

    -

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems module). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
-[(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    -

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    -

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    -

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    -

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    -

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    +

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems module). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
+[(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    +

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    +

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    +

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    +

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    +

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    Note
    If you are interested in using the program as the basis for your own experiments, you will also want to take a look at its continuation in step-32. Furthermore, step-32 later was developed into the much larger open source code ASPECT (see https://aspect.geodynamics.org/ ) that can solve realistic problems and that you may want to investigate before trying to morph step-31 into something that can solve whatever you want to solve.

    Boundary and initial conditions

    -

    Since the Boussinesq equations are derived under the assumption that inertia of the fluid's motion does not play a role, the flow field is at each time entirely determined by buoyancy difference at that time, not by the flow field at previous times. This is reflected by the fact that the first two equations above are the steady state Stokes equation that do not contain a time derivative. Consequently, we do not need initial conditions for either velocities or pressure. On the other hand, the temperature field does satisfy an equation with a time derivative, so we need initial conditions for $T$.

    -

    As for boundary conditions: if $\kappa>0$ then the temperature satisfies a second order differential equation that requires boundary data all around the boundary for all times. These can either be a prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla
-T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary condition, i.e., prescribe no thermal flux: $\phi=0$.

    -

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
-u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    +

    Since the Boussinesq equations are derived under the assumption that inertia of the fluid's motion does not play a role, the flow field is at each time entirely determined by buoyancy difference at that time, not by the flow field at previous times. This is reflected by the fact that the first two equations above are the steady state Stokes equation that do not contain a time derivative. Consequently, we do not need initial conditions for either velocities or pressure. On the other hand, the temperature field does satisfy an equation with a time derivative, so we need initial conditions for $T$.

    +

    As for boundary conditions: if $\kappa>0$ then the temperature satisfies a second order differential equation that requires boundary data all around the boundary for all times. These can either be a prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla
+T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary condition, i.e., prescribe no thermal flux: $\phi=0$.

    +

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
+u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    Solution approach

    -

    Like the equations solved in step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    -\begin{eqnarray*}
+<p>Like the equations solved in <a class=step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    +\begin{eqnarray*}
   \mathbf u + {\mathbf K}\lambda \nabla p &=& 0, \\
   \nabla\cdot \mathbf u &=& f,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3960.png"/>

    where now we have a Stokes system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=& f, \\
   \nabla\cdot \mathbf u &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3961.png"/>

    -

    where $\nabla \cdot \eta \varepsilon (\cdot)$ is an operator similar to the Laplacian $\Delta$ applied to a vector field.

    +

    where $\nabla \cdot \eta \varepsilon (\cdot)$ is an operator similar to the Laplacian $\Delta$ applied to a vector field.

    Given the similarity to what we have done in step-21, it may not come as a surprise that we choose a similar approach, although we will have to make adjustments for the change in operator in the top-left corner of the differential operator.

    Time stepping

    -

    The structure of the problem as a DAE allows us to use the same strategy as we have already used in step-21, i.e., we use a time lag scheme: we first solve the temperature equation (using an extrapolated velocity field), and then insert the new temperature solution into the right hand side of the velocity equation. The way we implement this in our code looks at things from a slightly different perspective, though. We first solve the Stokes equations for velocity and pressure using the temperature field from the previous time step, which means that we get the velocity for the previous time step. In other words, we first solve the Stokes system for time step $n - 1$ as

    -\begin{eqnarray*}
+<p>The structure of the problem as a DAE allows us to use the same strategy as we have already used in <a class=step-21, i.e., we use a time lag scheme: we first solve the temperature equation (using an extrapolated velocity field), and then insert the new temperature solution into the right hand side of the velocity equation. The way we implement this in our code looks at things from a slightly different perspective, though. We first solve the Stokes equations for velocity and pressure using the temperature field from the previous time step, which means that we get the velocity for the previous time step. In other words, we first solve the Stokes system for time step $n - 1$ as

    +\begin{eqnarray*}
   -\nabla \cdot (2\eta \varepsilon ({\mathbf u}^{n-1})) + \nabla p^{n-1} &=&
   -\rho\; \beta \; T^{n-1} \mathbf{g},
   \\
   \nabla \cdot {\mathbf u}^{n-1} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3965.png"/>

    -

    and then the temperature equation with an extrapolated velocity field to time $n$.

    -

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    -\begin{eqnarray*}
+<p> and then the temperature equation with an extrapolated velocity field to time <picture><source srcset=$n$.

    +

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    +\begin{eqnarray*}
   \frac 32 T^n
   -
   k\nabla \cdot \kappa \nabla T^n
@@ -239,13 +239,13 @@
   k(2{\mathbf u}^{n-1} - {\mathbf u}^{n-2} ) \cdot \nabla (2T^{n-1}-T^{n-2})
   +
   k\gamma.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3968.png"/>

    -

    Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity ${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
+<p> Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity <picture><source srcset=${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
 \approx T^{n-1} + k_n \frac{\partial T}{\partial t} \approx T^{n-1} + k_n
-\frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    -

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    -\begin{align*}
+\frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    +

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    +\begin{align*}
 \frac{\partial T}{\partial t} \approx
  \frac 1{k_n}
  \left(
@@ -255,10 +255,10 @@
        +
        \frac{k_n^2}{k_{n-1}(k_n+k_{n-1})} T^{n-2}
  \right)
- \end{align*} + \end{align*}" src="form_3972.png"/>

    and

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 T^n \approx
    T^{n-1} + k_n \frac{\partial T}{\partial t}
    \approx
@@ -266,10 +266,10 @@
    \frac{T^{n-1}-T^{n-2}}{k_{n-1}}
    =
    \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2},
-\end{align*} +\end{align*}" src="form_3973.png"/>

    and above equation is generalized as follows:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} T^n
   -
   k_n\nabla \cdot \kappa \nabla T^n
@@ -281,14 +281,14 @@
   k_n{\mathbf u}^{*,n} \cdot \nabla T^{*,n}
   +
   k_n\gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3974.png"/>

    -

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
-\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    -

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    +

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
+\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    +

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    Weak form and space discretization for the Stokes part

    -

    Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    -\begin{eqnarray*}
+<p>Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in <a class=step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    +\begin{eqnarray*}
   (\nabla {\mathbf v}_h, 2\eta \varepsilon ({\mathbf u}^{n-1}_h))
   -
   (\nabla \cdot {\mathbf v}_h, p^{n-1}_h)
@@ -296,12 +296,12 @@
   -({\mathbf v}_h, \rho\; \beta \; T^{n-1}_h \mathbf{g}),
   \\
   (q_h, \nabla \cdot {\mathbf u}^{n-1}_h) &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3979.png"/>

    -

    for all test functions $\mathbf v_h, q_h$. The first term of the first equation is considered as the inner product between tensors, i.e. $(\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^{n-1}_h))_\Omega
+<p> for all test functions <picture><source srcset=$\mathbf v_h, q_h$. The first term of the first equation is considered as the inner product between tensors, i.e. $(\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^{n-1}_h))_\Omega
  = \int_\Omega \sum_{i,j=1}^d [\nabla {\mathbf v}_h]_{ij}
-           \eta [\varepsilon ({\mathbf u}^{n-1}_h)]_{ij}\, dx$. Because the second tensor in this product is symmetric, the anti-symmetric component of $\nabla {\mathbf v}_h$ plays no role and it leads to the entirely same form if we use the symmetric gradient of $\mathbf v_h$ instead. Consequently, the formulation we consider and that we implement is

    -\begin{eqnarray*}
+           \eta [\varepsilon ({\mathbf u}^{n-1}_h)]_{ij}\, dx$. Because the second tensor in this product is symmetric, the anti-symmetric component of $\nabla {\mathbf v}_h$ plays no role and it leads to the entirely same form if we use the symmetric gradient of $\mathbf v_h$ instead. Consequently, the formulation we consider and that we implement is

    +\begin{eqnarray*}
   (\varepsilon({\mathbf v}_h), 2\eta \varepsilon ({\mathbf u}^{n-1}_h))
   -
   (\nabla \cdot {\mathbf v}_h, p^{n-1}_h)
@@ -309,32 +309,32 @@
   -({\mathbf v}_h, \rho\; \beta \; T^{n-1}_h \mathbf{g}),
   \\
   (q_h, \nabla \cdot {\mathbf u}^{n-1}_h) &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3984.png"/>

    This is exactly the same as what we already discussed in step-22 and there is not much more to say about this here.

    Stabilization, weak form and space discretization for the temperature equation

    The more interesting question is what to do with the temperature advection-diffusion equation. By default, not all discretizations of this equation are equally stable unless we either do something like upwinding, stabilization, or all of this. One way to achieve this is to use discontinuous elements (i.e., the FE_DGQ class that we used, for example, in the discretization of the transport equation in step-12, or in discretizing the pressure in step-20 and step-21) and to define a flux at the interface between cells that takes into account upwinding. If we had a pure advection problem this would probably be the simplest way to go. However, here we have some diffusion as well, and the discretization of the Laplace operator with discontinuous elements is cumbersome because of the significant number of additional terms that need to be integrated on each face between cells. Discontinuous elements also have the drawback that the use of numerical fluxes introduces an additional numerical diffusion that acts everywhere, whereas we would really like to minimize the effect of numerical diffusion to a minimum and only apply it where it is necessary to stabilize the scheme.

    A better alternative is therefore to add some nonlinear viscosity to the model. Essentially, what this does is to transform the temperature equation from the form

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\partial T}{\partial t}
   +
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma
-\end{eqnarray*} +\end{eqnarray*}" src="form_3985.png"/>

    to something like

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\partial T}{\partial t}
   +
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot (\kappa+\nu(T)) \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3986.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-03-17 21:57:45.463246775 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-03-17 21:57:45.475246848 +0000 @@ -166,58 +166,58 @@

    In addition to these changes, we also use a slightly different preconditioner, and we will have to make a number of changes that have to do with the fact that we want to solve a realistic problem here, not a model problem. The latter, in particular, will require that we think about scaling issues as well as what all those parameters and coefficients in the equations under consideration actually mean. We will discuss first the issues that affect changes in the mathematical formulation and solver structure, then how to parallelize things, and finally the actual testcase we will consider.

    Using the "right" pressure

    In step-31, we used the following Stokes model for the velocity and pressure field:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho \; \beta \; T \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4126.png"/>

    -

    The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that $\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
-[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    -\begin{eqnarray*}
+<p> The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that <picture><source srcset=$\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
+[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho_{\text{ref}} [1-\beta(T-T_{\text{ref}})] \mathbf{g}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4130.png"/>

    -

    Now note that the gravity force results from a gravity potential as $\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    -\begin{eqnarray*}
+<p> Now note that the gravity force results from a gravity potential as <picture><source srcset=$\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho_{\text{ref}} \; \beta\; T\; \mathbf{g}
   -\rho_{\text{ref}} [1+\beta T_{\text{ref}}] \nabla\varphi.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4132.png"/>

    -

    The second term on the right is time independent, and so we could introduce a new "dynamic" pressure $p_{\text{dyn}}=p+\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    -\begin{eqnarray*}
+<p> The second term on the right is time independent, and so we could introduce a new $p_{\text{dyn}}=p+\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p_{\text{dyn}} &=&
   -\rho_{\text{ref}} \; \beta \; T \; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4134.png"/>

    This is exactly the form we used in step-31, and it was appropriate to do so because all changes in the fluid flow are only driven by the dynamic pressure that results from temperature differences. (In other words: Any contribution to the right hand side that results from taking the gradient of a scalar field have no effect on the velocity field.)

    On the other hand, we will here use the form of the Stokes equations that considers the total pressure instead:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T)\; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4135.png"/>

    There are several advantages to this:

    -
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.
    +
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.

    The scaling of discretized equations

    Remember that we want to solve the following set of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \mathbf{g},
   \\
@@ -228,11 +228,11 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_4138.png"/>

    augmented by appropriate boundary and initial conditions. As discussed in step-31, we will solve this set of equations by solving for a Stokes problem first in each time step, and then moving the temperature equation forward by one time interval.

    The problem under consideration in this current section is with the Stokes problem: if we discretize it as usual, we get a linear system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   M \; X
   =
   \left(\begin{array}{cc}
@@ -247,10 +247,10 @@
   \end{array}\right)
   =
   F
-\end{eqnarray*} +\end{eqnarray*}" src="form_4139.png"/>

    which in this program we will solve with a FGMRES solver. This solver iterates until the residual of these linear equations is below a certain tolerance, i.e., until

    -\[
+<picture><source srcset=\[
   \left\|
   \left(\begin{array}{c}
     F_U - A U^{(k)} - B P^{(k)}
@@ -259,35 +259,35 @@
   \end{array}\right)
   \right\|
   < \text{Tol}.
-\] +\]" src="form_4140.png"/>

    -

    This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units $\frac{\text{Pa}}{\text{m}}
-\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
-       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    -

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
-\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    -

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    -\begin{eqnarray*}
+<p> This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units <picture><source srcset=$\frac{\text{Pa}}{\text{m}}
+\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
+       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    +

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
+\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    +

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4156.png"/>

    -

    The trouble with this is that the result is not symmetric any more (we have $\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    -\begin{eqnarray*}
+<p> The trouble with this is that the result is not symmetric any more (we have <picture><source srcset=$\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) +
   \nabla \left(\frac{\eta}{L} \hat p\right) &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4159.png"/>

    -

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    -

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    +

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    +

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    Changes to the Stokes preconditioner and solver

    -

    In this tutorial program, we apply a variant of the preconditioner used in step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    -\begin{eqnarray*}
+<p>In this tutorial program, we apply a variant of the preconditioner used in <a class=step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    +\begin{eqnarray*}
   P^{-1} M
   =
   \left(\begin{array}{cc}
@@ -296,24 +296,24 @@
   \left(\begin{array}{cc}
     A & B^T \\ B & 0
   \end{array}\right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_4161.png"/>

    -

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    -

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    +

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    +

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    That said, even though the solver worked well for step-31, we have a problem here that is a bit more complicated (cells are deformed, the pressure varies by orders of magnitude, and we want to plan ahead for more complicated physics), and so we'll change a few things slightly:

    -

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
-\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    +

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
+\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    Changes to the artificial viscosity stabilization

    -

    Similarly to step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    -\begin{eqnarray*}
+<p>Similarly to <a class=step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    +\begin{eqnarray*}
   \nu_\alpha(T)|_K
   =
   \nu_1(T)|_K
@@ -325,76 +325,76 @@
     1,
     \frac{\|R_1(T)\|_{L^\infty(K)}}{c(\mathbf{u},T)}
   \right\}
-\end{eqnarray*} +\end{eqnarray*}" src="form_4168.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-03-17 21:57:45.599247614 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-03-17 21:57:45.611247689 +0000 @@ -166,16 +166,16 @@

    Introduction

    Euler flow

    The equations that describe the movement of a compressible, inviscid gas (the so-called Euler equations of gas dynamics) are a basic system of conservation laws. In spatial dimension $d$ they read

    -\[
+<picture><source srcset=\[
 \partial_t \mathbf{w} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_4265.png"/>

    -

    with the solution $\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
-E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
-G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    -

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    -\begin{eqnarray*}
+<p> with the solution <picture><source srcset=$\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
+E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
+G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    +

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    +\begin{eqnarray*}
   \mathbf F(\mathbf w)
   =
   \left(
@@ -187,10 +187,10 @@
     (E+p) v_1 & (E+p) v_2 & (E+p) v_3
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4272.png"/>

    and we will choose as particular right hand side forcing only the effects of gravity, described by

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf G(\mathbf w)
   =
   \left(
@@ -202,43 +202,43 @@
     \rho \mathbf g \cdot \mathbf v
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4273.png"/>

    -

    where $\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    +\begin{eqnarray*}
   \partial_t (\rho v_i) + \sum_{s=1}^d \frac{\partial(\rho v_i v_s +
   \delta_{is} p)}{\partial x_s} &=& g_i \rho, \qquad i=1,\dots,d, \\
   \partial_t \rho + \sum_{s=1}^d \frac{\partial(\rho v_s)}{\partial x_s} &=& 0,  \\
   \partial_t E + \sum_{s=1}^d \frac{\partial((E+p)v_s)}{\partial x_s} &=&
   \rho \mathbf g \cdot \mathbf v.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4275.png"/>

    -

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
-(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    +

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
+(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    This problem obviously falls into the class of vector-valued problems. A general overview of how to deal with these problems in deal.II can be found in the Handling vector valued problems module.

    Discretization

    -

    Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    -\begin{eqnarray*}
+<p>Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in <a class=step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    +\begin{eqnarray*}
 &&\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) + (\nabla \cdot \mathbf{F}(\mathbf{w}), \mathbf{z}) \\
 &\approx &\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) - (\mathbf{F}(\mathbf{w}), \nabla \mathbf{z}) + h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z}) + \int_{\partial \Omega} (\mathbf{H}(\mathbf{w}^+, \mathbf{w}^-, \mathbf{n}), \mathbf{z}^+),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4280.png"/>

    -

    where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

    -

    On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

    The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
+\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

    It should be noted that Sacado provides more auto-differentiation capabilities than the small subset used in this program. However, understanding the example above is enough to understand the use of Sacado in this Euler flow program.

    Trilinos solvers

    The program uses either the Aztec iterative solvers, or the Amesos sparse direct solver, both provided by the Trilinos package. This package is inherently designed to be used in a parallel program, however, it may be used in serial just as easily, as is done here. The Epetra package is the basic vector/matrix library upon which the solvers are built. This very powerful package can be used to describe the parallel distribution of a vector, and to define sparse matrices that operate on these vectors. Please view the commented code for more details on how these solvers are used within the example.

    @@ -325,8 +325,8 @@

    Implementation

    The implementation of this program is split into three essential parts:

    Transformations between variables

    -

    Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

    +

    Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

      static const double gas_gamma;
     
     
    -

    In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
-   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

    +

    In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
+   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

      template <typename InputVector>
      static typename InputVector::value_type
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-03-17 21:57:45.699248232 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-03-17 21:57:45.707248281 +0000 @@ -139,7 +139,7 @@

    Irrotational flow

    The incompressible motion of an inviscid fluid past a body (for example air past an airplane wing, or air or water past a propeller) is usually modeled by the Euler equations of fluid dynamics:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac{\partial }{\partial t}\mathbf{v} + (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p + \mathbf{g}
@@ -147,12 +147,12 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4379.png"/>

    -

    where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

    +

    where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

    The above equations can be derived from Navier-Stokes equations assuming that the effects due to viscosity are negligible compared to those due to the pressure gradient, inertial forces and the external forces. This is the opposite case of the Stokes equations discussed in step-22 which are the limit case of dominant viscosity, i.e. where the velocity is so small that inertia forces can be neglected. On the other hand, owing to the assumed incompressibility, the equations are not suited for very high speed gas flows where compressibility and the equation of state of the gas have to be taken into account, leading to the Euler equations of gas dynamics, a hyperbolic system.

    For the purpose of this tutorial program, we will consider only stationary flow without external forces:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p
@@ -160,159 +160,159 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4380.png"/>

    Uniqueness of the solution of the Euler equations is ensured by adding the boundary conditions

    -\[
+<picture><source srcset=\[
   \label{eq:boundary-conditions}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{v}& = 0 \qquad && \text{ on } \partial\Omega \\
     \mathbf{v}& = \mathbf{v}_\infty && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4381.png"/>

    -

    which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

    +

    which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

    For both stationary and non stationary flow, the solution process starts by solving for the velocity in the second equation and substituting in the first equation in order to find the pressure. The solution of the stationary Euler equations is typically performed in order to understand the behavior of the given (possibly complex) geometry when a prescribed motion is enforced on the system.

    -

    The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity $\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
-\mathbf{v}_\infty=0$) and we have boundary conditions

    -\[
+<p>The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity <picture><source srcset=$\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
+\mathbf{v}_\infty=0$) and we have boundary conditions

    +\[
   \label{eq:boundary-conditions-tilde}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{\tilde{v}}& = -\mathbf{n}\cdot\mathbf{v}_\infty \qquad && \text{ on } \partial\Omega \\
     \mathbf{\tilde{v}}& = 0 && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4385.png"/>

    -

    If we assume that the fluid is irrotational, i.e., $\nabla \times
-\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

    -\[
+<p>If we assume that the fluid is irrotational, i.e., <picture><source srcset=$\nabla \times
+\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

    +\[
   \mathbf{\tilde{v}}=\nabla\phi,
-\] +\]" src="form_4388.png"/>

    and so the second part of Euler equations above can be rewritten as the homogeneous Laplace equation for the unknown $\phi$:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 \label{laplace}
 \Delta\phi &= 0 \qquad &&\text{in}\ \mathbb{R}^n\backslash\Omega,
            \\
            \mathbf{n}\cdot\nabla\phi &= -\mathbf{n}\cdot\mathbf{v}_\infty
            && \text{on}\ \partial\Omega
-\end{align*} +\end{align*}" src="form_4389.png"/>

    -

    while the momentum equation reduces to Bernoulli's equation that expresses the pressure $p$ as a function of the potential $\phi$:

    -\[
+<p> while the momentum equation reduces to Bernoulli's equation that expresses the pressure <picture><source srcset=$p$ as a function of the potential $\phi$:

    +\[
 \frac{p}{\rho} +\frac{1}{2} | \nabla \phi |^2 = 0 \in \Omega.
-\] +\]" src="form_4390.png"/>

    So we can solve the problem by solving the Laplace equation for the potential. We recall that the following functions, called fundamental solutions of the Laplace equation,

    -\[ \begin{aligned}
+<picture><source srcset=\[ \begin{aligned}
 \label{eq:3} G(\mathbf{y}-\mathbf{x}) = &
 -\frac{1}{2\pi}\ln|\mathbf{y}-\mathbf{x}| \qquad && \text{for } n=2 \\
 G(\mathbf{y}-\mathbf{x}) = &
 \frac{1}{4\pi}\frac{1}{|\mathbf{y}-\mathbf{x}|}&& \text{for } n=3,
 \end{aligned}
-\] +\]" src="form_4391.png"/>

    satisfy in a distributional sense the equation:

    -\[
+<picture><source srcset=\[
 -\Delta_y G(\mathbf{y}-\mathbf{x}) = \delta(\mathbf{y}-\mathbf{x}),
-\] +\]" src="form_4392.png"/>

    -

    where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

    +

    where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

    -\[\label{green}
+<picture><source srcset=\[\label{green}
   \int_{\omega}
   (-\Delta u)v\,dx + \int_{\partial\omega} \frac{\partial u}{\partial \tilde{\mathbf{n}} }v \,ds
   =
   \int_{\omega}
   (-\Delta v)u\,dx + \int_{\partial\omega} u\frac{\partial v}{\partial \tilde{\mathbf{n}}} \,ds,
-\] +\]" src="form_4395.png"/>

    -

    where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

    -

    In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
-\Gamma$, where the "boundary" at infinity is defined as

    +

    where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

    +

    In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
+\Gamma$, where the "boundary" at infinity is defined as

    -\[
+<picture><source srcset=\[
 \Gamma_\infty \dealcoloneq \lim_{r\to\infty} \partial B_r(0).
-\] +\]" src="form_4398.png"/>

    -

    In our program the normals are defined as outer to the domain $\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

    -

    If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

    -\[
+<p>In our program the normals are defined as <em>outer</em> to the domain <picture><source srcset=$\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

    +

    If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

    +\[
   \phi(\mathbf{x}) -
   \int_{\Gamma\cup\Gamma_\infty}\frac{\partial G(\mathbf{y}-\mathbf{x})}{\partial \mathbf{n}_y}\phi(\mathbf{y})\,ds_y
   =
   -\int_{\Gamma\cup\Gamma_\infty}G(\mathbf{y}-\mathbf{x})\frac{\partial \phi}{\partial \mathbf{n}_y}(\mathbf{y})\,ds_y
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega
-\] +\]" src="form_4400.png"/>

    where the normals are now pointing inward the domain of integration.

    -

    Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

    -

    The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

    +

    Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

    +

    The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

    -\[
+<picture><source srcset=\[
 -\int_{\Gamma_\infty} \frac{\partial G(\mathbf{y}-\mathbf{x})}
 {\partial \mathbf{n}_y}\phi_\infty \,ds_y =
 \lim_{r\to\infty} \int_{\partial B_r(0)} \frac{\mathbf{r}}{r} \cdot \nabla G(\mathbf{y}-\mathbf{x})
 \phi_\infty \,ds_y = -\phi_\infty.
-\] +\]" src="form_4405.png"/>

    Using this result, we can reduce the above equation only on the boundary $\Gamma$ using the so-called Single and Double Layer Potential operators:

    -\[\label{integral}
+<picture><source srcset=\[\label{integral}
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty
   -\left(S \frac{\partial \phi}{\partial n_y}\right)(\mathbf{x})
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4406.png"/>

    -

    (The name of these operators comes from the fact that they describe the electric potential in $\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

    -

    In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

    -\[
+<p>(The name of these operators comes from the fact that they describe the electric potential in <picture><source srcset=$\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

    +

    In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

    +\[
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
    \left(S[\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
    \qquad \forall\mathbf{x} \in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4408.png"/>

    -

    If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

    +

    If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

    -\[\label{SD}
+<picture><source srcset=\[\label{SD}
   \alpha(\mathbf{x})\phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
   \left(S [\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
   \quad \mathbf{x}\in \partial\Omega,
-\] +\]" src="form_4409.png"/>

    -

    which is the Boundary Integral Equation (BIE) we were looking for, where the quantity $\alpha(\mathbf{x})$ is the fraction of angle or solid angle by which the point $\mathbf{x}$ sees the domain of integration $\mathbb{R}^n\backslash\Omega$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-03-17 21:57:45.795248825 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-03-17 21:57:45.799248850 +0000 @@ -139,57 +139,57 @@

    Introduction

    Motivation

    The purpose of this program is to show how to effectively solve the incompressible time-dependent Navier-Stokes equations. These equations describe the flow of a viscous incompressible fluid and read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   u_t + u \cdot \nabla u - \nu \Delta u + \nabla p = f, \\
   \nabla \cdot u = 0,
-\end{align*} +\end{align*}" src="form_4489.png"/>

    -

    where $u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

    -\[
+<p> where <picture><source srcset=$u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

    +\[
   u |_{t=0} = u_0,
-\] +\]" src="form_4490.png"/>

    -

    with $u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

    -\[
+<p> with <picture><source srcset=$u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

    +\[
   u|_{\partial\Omega} = u_b.
-\] +\]" src="form_4491.png"/>

    -

    It is possible to prescribe other boundary conditions as well. In the test case that we solve here the boundary is partitioned into two disjoint subsets $\partial\Omega = \Gamma_1 \cup \Gamma_2$ and we have

    -\[
+<p> It is possible to prescribe other boundary conditions as well. In the test case that we solve here the boundary is partitioned into two disjoint subsets <picture><source srcset=$\partial\Omega = \Gamma_1 \cup \Gamma_2$ and we have

    +\[
   u|_{\Gamma_1} = u_b,
-\] +\]" src="form_4493.png"/>

    and

    -\[
+<picture><source srcset=\[
  u\times n|_{\Gamma_2} = 0, \quad p|_{\Gamma_2} = 0
-\] +\]" src="form_4494.png"/>

    -

    where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

    +

    where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

    In previous tutorial programs (see for instance step-20 and step-22) we have seen how to solve the time-independent Stokes equations using a Schur complement approach. For the time-dependent case, after time discretization, we would arrive at a system like

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac1\tau u^k - \nu \Delta u^k + \nabla p^k = F^k, \\
   \nabla \cdot u^k = 0,
-\end{align*} +\end{align*}" src="form_4495.png"/>

    -

    where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

    +

    where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

    Projection methods

    Rather, we need to come up with a different approach to solve the time-dependent Navier-Stokes equations. The difficulty in their solution comes from the fact that the velocity and the pressure are coupled through the constraint

    -\[
+<picture><source srcset=\[
   \nabla \cdot u = 0,
-\] +\]" src="form_4497.png"/>

    for which the pressure is the Lagrange multiplier. Projection methods aim at decoupling this constraint from the diffusion (Laplace) operator.

    -

    Let us shortly describe how the projection methods look like in a semi-discrete setting. The objective is to obtain a sequence of velocities $\{u^k\}$ and pressures $\{p^k\}$. We will also obtain a sequence $\{\phi^k\}$ of auxiliary variables. Suppose that from the initial conditions, and an application of a first order method we have found $(u^0,p^0,\phi^0=0)$ and $(u^1,p^1,\phi^1=p^1-p^0)$. Then the projection method consists of the following steps:

    Finally, we will have an object that contains "constraints" on our degrees of freedom. This could include hanging node constraints if we had adaptively refined meshes (which we don't have in the current program). Here, we will store the constraints for boundary nodes $U_i=0$.

      AffineConstraints<double> constraints;
      };
     
    @@ -346,7 +346,7 @@
    Definition patterns.h:1022
    Definition patterns.h:189

    EigenvalueProblem::make_grid_and_dofs

    -

    The next function creates a mesh on the domain $[-1,1]^d$, refines it as many times as the input file calls for, and then attaches a DoFHandler to it and initializes the matrices and vectors to their correct sizes. We also build the constraints that correspond to the boundary values $u|_{\partial\Omega}=0$.

    +

    The next function creates a mesh on the domain $[-1,1]^d$, refines it as many times as the input file calls for, and then attaches a DoFHandler to it and initializes the matrices and vectors to their correct sizes. We also build the constraints that correspond to the boundary values $u|_{\partial\Omega}=0$.

    For the matrices, we use the PETSc wrappers. These have the ability to allocate memory as necessary as non-zero entries are added. This seems inefficient: we could as well first compute the sparsity pattern, initialize the matrices with it, and as we then insert entries we can be sure that we do not need to re-allocate memory and free the one used previously. One way to do that would be to use code like this:

    dsp (dof_handler.n_dofs(),
    dof_handler.n_dofs());
    @@ -392,10 +392,10 @@
     

    EigenvalueProblem::assemble_system

    -

    Here, we assemble the global stiffness and mass matrices from local contributions $A^K_{ij} = \int_K \nabla\varphi_i(\mathbf x) \cdot
+<p>Here, we assemble the global stiffness and mass matrices from local contributions <picture><source srcset=$A^K_{ij} = \int_K \nabla\varphi_i(\mathbf x) \cdot
    \nabla\varphi_j(\mathbf x) + V(\mathbf x)\varphi_i(\mathbf
-   x)\varphi_j(\mathbf x)$ and $M^K_{ij} = \int_K \varphi_i(\mathbf
-   x)\varphi_j(\mathbf x)$ respectively. This function should be immediately familiar if you've seen previous tutorial programs. The only thing new would be setting up an object that described the potential $V(\mathbf x)$ using the expression that we got from the input file. We then need to evaluate this object at the quadrature points on each cell. If you've seen how to evaluate function objects (see, for example the coefficient in step-5), the code here will also look rather familiar.

    + x)\varphi_j(\mathbf x)$" src="form_4569.png"/> and $M^K_{ij} = \int_K \varphi_i(\mathbf
+   x)\varphi_j(\mathbf x)$ respectively. This function should be immediately familiar if you've seen previous tutorial programs. The only thing new would be setting up an object that described the potential $V(\mathbf x)$ using the expression that we got from the input file. We then need to evaluate this object at the quadrature points on each cell. If you've seen how to evaluate function objects (see, for example the coefficient in step-5), the code here will also look rather familiar.

      template <int dim>
      void EigenvalueProblem<dim>::assemble_system()
      {
    @@ -519,9 +519,9 @@
      eigenfunctions,
      eigenfunctions.size());
     
    -

    The output of the call above is a set of vectors and values. In eigenvalue problems, the eigenfunctions are only determined up to a constant that can be fixed pretty arbitrarily. Knowing nothing about the origin of the eigenvalue problem, SLEPc has no other choice than to normalize the eigenvectors to one in the $l_2$ (vector) norm. Unfortunately this norm has little to do with any norm we may be interested from a eigenfunction perspective: the $L_2(\Omega)$ norm, or maybe the $L_\infty(\Omega)$ norm.

    -

    Let us choose the latter and rescale eigenfunctions so that they have $\|\phi_i(\mathbf x)\|_{L^\infty(\Omega)}=1$ instead of $\|\Phi\|_{l_2}=1$ (where $\phi_i$ is the $i$th eigenfunction and $\Phi_i$ the corresponding vector of nodal values). For the $Q_1$ elements chosen here, we know that the maximum of the function $\phi_i(\mathbf x)$ is attained at one of the nodes, so $\max_{\mathbf
-   x}\phi_i(\mathbf x)=\max_j (\Phi_i)_j$, making the normalization in the $L_\infty$ norm trivial. Note that this doesn't work as easily if we had chosen $Q_k$ elements with $k>1$: there, the maximum of a function does not necessarily have to be attained at a node, and so $\max_{\mathbf x}\phi_i(\mathbf x)\ge\max_j (\Phi_i)_j$ (although the equality is usually nearly true).

    +

    The output of the call above is a set of vectors and values. In eigenvalue problems, the eigenfunctions are only determined up to a constant that can be fixed pretty arbitrarily. Knowing nothing about the origin of the eigenvalue problem, SLEPc has no other choice than to normalize the eigenvectors to one in the $l_2$ (vector) norm. Unfortunately this norm has little to do with any norm we may be interested from a eigenfunction perspective: the $L_2(\Omega)$ norm, or maybe the $L_\infty(\Omega)$ norm.

    +

    Let us choose the latter and rescale eigenfunctions so that they have $\|\phi_i(\mathbf x)\|_{L^\infty(\Omega)}=1$ instead of $\|\Phi\|_{l_2}=1$ (where $\phi_i$ is the $i$th eigenfunction and $\Phi_i$ the corresponding vector of nodal values). For the $Q_1$ elements chosen here, we know that the maximum of the function $\phi_i(\mathbf x)$ is attained at one of the nodes, so $\max_{\mathbf
+   x}\phi_i(\mathbf x)=\max_j (\Phi_i)_j$, making the normalization in the $L_\infty$ norm trivial. Note that this doesn't work as easily if we had chosen $Q_k$ elements with $k>1$: there, the maximum of a function does not necessarily have to be attained at a node, and so $\max_{\mathbf x}\phi_i(\mathbf x)\ge\max_j (\Phi_i)_j$ (although the equality is usually nearly true).

      for (auto &eigenfunction : eigenfunctions)
      eigenfunction /= eigenfunction.linfty_norm();
     
    @@ -549,7 +549,7 @@
    void attach_dof_handler(const DoFHandler< dim, spacedim > &)
    Definition data_out.h:148
    std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
    Definition utilities.cc:471
    -

    The only thing worth discussing may be that because the potential is specified as a function expression in the input file, it would be nice to also have it as a graphical representation along with the eigenfunctions. The process to achieve this is relatively straightforward: we build an object that represents $V(\mathbf x)$ and then we interpolate this continuous function onto the finite element space. The result we also attach to the DataOut object for visualization.

    +

    The only thing worth discussing may be that because the potential is specified as a function expression in the input file, it would be nice to also have it as a graphical representation along with the eigenfunctions. The process to achieve this is relatively straightforward: we build an object that represents $V(\mathbf x)$ and then we interpolate this continuous function onto the finite element space. The result we also attach to the DataOut object for visualization.

      Vector<double> projected_potential(dof_handler.n_dofs());
      {
      FunctionParser<dim> potential;
    @@ -658,7 +658,7 @@
    set Global mesh refinement steps = 5
    set Number of eigenvalues/eigenfunctions = 5
    set Potential = 0
    -

    Here, the potential is zero inside the domain, and we know that the eigenvalues are given by $\lambda_{(mn)}=\frac{\pi^2}{4}(m^2+n^2)$ where $m,n\in{\mathbb N^+}$. Eigenfunctions are sines and cosines with $m$ and $n$ periods in $x$ and $y$ directions. This matches the output our program generates:

    examples/step-36> make run
    +

    Here, the potential is zero inside the domain, and we know that the eigenvalues are given by $\lambda_{(mn)}=\frac{\pi^2}{4}(m^2+n^2)$ where $m,n\in{\mathbb N^+}$. Eigenfunctions are sines and cosines with $m$ and $n$ periods in $x$ and $y$ directions. This matches the output our program generates:

    examples/step-36> make run
    ============================ Running step-36
    Number of active cells: 1024
    Number of degrees of freedom: 1089
    @@ -671,7 +671,7 @@
    Eigenvalue 4 : 24.837
    Job done.
    -

    These eigenvalues are exactly the ones that correspond to pairs $(m,n)=(1,1)$, $(1,2)$ and $(2,1)$, $(2,2)$, and $(3,1)$. A visualization of the corresponding eigenfunctions would look like this:

    +

    These eigenvalues are exactly the ones that correspond to pairs $(m,n)=(1,1)$, $(1,2)$ and $(2,1)$, $(2,2)$, and $(3,1)$. A visualization of the corresponding eigenfunctions would look like this:

    @@ -683,7 +683,7 @@

    In this program, we use the interior penalty method and Nitsche's weak boundary conditions to solve Poisson's equation. We use multigrid methods on locally refined meshes, which are generated using a bulk criterion and a standard error estimator based on cell and face residuals. All operators are implemented using the MeshWorker interface.

    -

    Like in step-12, the discretization relies on finite element spaces, which are polynomial inside the mesh cells $K\in \mathbb T_h$, but have no continuity between cells. Since such functions have two values on each interior face $F\in \mathbb F_h^i$, one from each side, we define mean value and jump operators as follows: let K1 and K2 be the two cells sharing a face, and let the traces of functions ui and the outer normal vectors ni be labeled accordingly. Then, on the face, we let

    -\[
+<p>Like in <a class=step-12, the discretization relies on finite element spaces, which are polynomial inside the mesh cells $K\in \mathbb T_h$, but have no continuity between cells. Since such functions have two values on each interior face $F\in \mathbb F_h^i$, one from each side, we define mean value and jump operators as follows: let K1 and K2 be the two cells sharing a face, and let the traces of functions ui and the outer normal vectors ni be labeled accordingly. Then, on the face, we let

    +\[
         \average{ u } = \frac{u_1 + u_2}2
-\] +\]" src="form_4692.png"/>

    Note, that if such an expression contains a normal vector, the averaging operator turns into a jump. The interior penalty method for the problem

    -\[
+<picture><source srcset=\[
   -\Delta u = f \text{ in }\Omega \qquad u = u^D \text{ on } \partial\Omega
-\] +\]" src="form_4693.png"/>

    becomes

    -\begin{multline*}
+<picture><source srcset=\begin{multline*}
   \sum_{K\in \mathbb T_h} (\nabla u, \nabla v)_K
   \\
   + \sum_{F \in F_h^i} \biggl\{4\sigma_F (\average{ u \mathbf n}, \average{ v \mathbf n })_F
@@ -141,17 +141,17 @@
   = (f, v)_\Omega + \sum_{F \in F_h^b} \biggl\{
   2\sigma_F (u^D, v)_F - (\partial_n v,u^D)_F
   \biggr\}.
-\end{multline*} +\end{multline*}" src="form_4694.png"/>

    -

    Here, $\sigma_F$ is the penalty parameter, which is chosen as follows: for a face F of a cell K, compute the value

    -\[
+<p>Here, <picture><source srcset=$\sigma_F$ is the penalty parameter, which is chosen as follows: for a face F of a cell K, compute the value

    +\[
 \sigma_{F,K} = p(p+1) \frac{|F|_{d-1}}{|K|_d},
-\] +\]" src="form_4696.png"/>

    -

    where p is the polynomial degree of the finite element functions and $|\cdot|_d$ and $|\cdot|_{d-1}$ denote the $d$ and $d-1$ dimensional Hausdorff measure of the corresponding object. If the face is at the boundary, choose $\sigma_F = \sigma_{F,K}$. For an interior face, we take the average of the two values at this face.

    +

    where p is the polynomial degree of the finite element functions and $|\cdot|_d$ and $|\cdot|_{d-1}$ denote the $d$ and $d-1$ dimensional Hausdorff measure of the corresponding object. If the face is at the boundary, choose $\sigma_F = \sigma_{F,K}$. For an interior face, we take the average of the two values at this face.

    In our finite element program, we distinguish three different integrals, corresponding to the sums over cells, interior faces and boundary faces above. Since the MeshWorker::loop organizes the sums for us, we only need to implement the integrals over each mesh element. The class MatrixIntegrator below has these three functions for the left hand side of the formula, the class RHSIntegrator for the right.

    As we will see below, even the error estimate is of the same structure, since it can be written as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \eta^2 &= \eta_K^2 + \eta_F^2 + \eta_B^2
   \\
   \eta_K^2 &= \sum_{K\in \mathbb T_h} h^2 \|f + \Delta u_h\|^2
@@ -160,7 +160,7 @@
     4 \sigma_F \| \average{u_h\mathbf n} \|^2 + h \|\average{\partial_n u_h}\|^2 \biggr\}
   \\
   \eta_B^2 &= \sum_{F \in F_h^b} 2\sigma_F \| u_h-u^D \|^2.
-\end{align*} +\end{align*}" src="form_4701.png"/>

    Thus, the functions for assembling matrices, right hand side and error estimates below exhibit that these loops are all generic and can be programmed in the same way.

    This program is related to step-12b, in that it uses MeshWorker and discontinuous Galerkin methods. While there, we solved an advection problem, here it is a diffusion problem. Here, we also use multigrid preconditioning and a theoretically justified error estimator, see Karakashian and Pascal (2003). The multilevel scheme was discussed in detail in Kanschat (2004). The adaptive iteration and its convergence have been discussed (for triangular meshes) in Hoppe, Kanschat, and Warburton (2009).

    @@ -450,10 +450,10 @@
    FiniteElementData::tensor_degree
    unsigned int tensor_degree() const

    Finally we have an integrator for the error. Since the energy norm for discontinuous Galerkin problems not only involves the difference of the gradient inside the cells, but also the jump terms across faces and at the boundary, we cannot just use VectorTools::integrate_difference(). Instead, we use the MeshWorker interface to compute the error ourselves.

    There are several different ways to define this energy norm, but all of them are equivalent to each other uniformly with mesh size (some not uniformly with polynomial degree). Here, we choose

    -\[ \|u\|_{1,h} =
+<picture><source srcset=\[ \|u\|_{1,h} =
    \sum_{K\in \mathbb T_h} \|\nabla u\|_K^2 + \sum_{F \in F_h^i}
    4\sigma_F\|\average{ u \mathbf n}\|^2_F + \sum_{F \in F_h^b}
-   2\sigma_F\|u\|^2_F \] + 2\sigma_F\|u\|^2_F \]" src="form_4702.png"/>

      template <int dim>
      class ErrorIntegrator : public MeshWorker::LocalIntegrator<dim>
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 2024-03-17 21:57:46.159251073 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 2024-03-17 21:57:46.167251123 +0000 @@ -253,7 +253,7 @@ but I don't actually know any such function with this name and these arguments."

    But back to the concrete case here: For this tutorial, we choose as right hand side the function $4(x^4+y^4)$ in 2d, or $4(x^4+y^4+z^4)$ in 3d. We could write this distinction using an if-statement on the space dimension, but here is a simple way that also allows us to use the same function in 1d (or in 4D, if you should desire to do so), by using a short loop. Fortunately, the compiler knows the size of the loop at compile time (remember that at the time when you define the template, the compiler doesn't know the value of dim, but when it later encounters a statement or declaration RightHandSide<2>, it will take the template, replace all occurrences of dim by 2 and compile the resulting function). In other words, at the time of compiling this function, the number of times the body will be executed is known, and the compiler can minimize the overhead needed for the loop; the result will be as fast as if we had used the formulas above right away.

    -

    The last thing to note is that a Point<dim> denotes a point in dim-dimensional space, and its individual components (i.e. $x$, $y$, ... coordinates) can be accessed using the () operator (in fact, the [] operator will work just as well) with indices starting at zero as usual in C and C++.

    +

    The last thing to note is that a Point<dim> denotes a point in dim-dimensional space, and its individual components (i.e. $x$, $y$, ... coordinates) can be accessed using the () operator (in fact, the [] operator will work just as well) with indices starting at zero as usual in C and C++.

      template <int dim>
      double RightHandSide<dim>::value(const Point<dim> &p,
      const unsigned int /*component*/) const
    @@ -267,7 +267,7 @@
     
     
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
    -

    As boundary values, we choose $x^2+y^2$ in 2d, and $x^2+y^2+z^2$ in 3d. This happens to be equal to the square of the vector from the origin to the point at which we would like to evaluate the function, irrespective of the dimension. So that is what we return:

    +

    As boundary values, we choose $x^2+y^2$ in 2d, and $x^2+y^2+z^2$ in 3d. This happens to be equal to the square of the vector from the origin to the point at which we would like to evaluate the function, irrespective of the dimension. So that is what we return:

      template <int dim>
      double BoundaryValues<dim>::value(const Point<dim> &p,
      const unsigned int /*component*/) const
    @@ -290,8 +290,8 @@
     
     

    Step4::make_grid

    -

    Grid creation is something inherently dimension dependent. However, as long as the domains are sufficiently similar in 2d or 3d, the library can abstract for you. In our case, we would like to again solve on the square $[-1,1]\times [-1,1]$ in 2d, or on the cube $[-1,1] \times [-1,1] \times
-   [-1,1]$ in 3d; both can be termed GridGenerator::hyper_cube(), so we may use the same function in whatever dimension we are. Of course, the functions that create a hypercube in two and three dimensions are very much different, but that is something you need not care about. Let the library handle the difficult things.

    +

    Grid creation is something inherently dimension dependent. However, as long as the domains are sufficiently similar in 2d or 3d, the library can abstract for you. In our case, we would like to again solve on the square $[-1,1]\times [-1,1]$ in 2d, or on the cube $[-1,1] \times [-1,1] \times
+   [-1,1]$ in 3d; both can be termed GridGenerator::hyper_cube(), so we may use the same function in whatever dimension we are. Of course, the functions that create a hypercube in two and three dimensions are very much different, but that is something you need not care about. Let the library handle the difficult things.

      template <int dim>
      void Step4<dim>::make_grid()
      {
    @@ -383,7 +383,7 @@
      fe_values.JxW(q_index)); // dx
      }
    -

    As a final remark to these loops: when we assemble the local contributions into cell_matrix(i,j), we have to multiply the gradients of shape functions $i$ and $j$ at point number q_index and multiply it with the scalar weights JxW. This is what actually happens: fe_values.shape_grad(i,q_index) returns a dim dimensional vector, represented by a Tensor<1,dim> object, and the operator* that multiplies it with the result of fe_values.shape_grad(j,q_index) makes sure that the dim components of the two vectors are properly contracted, and the result is a scalar floating point number that then is multiplied with the weights. Internally, this operator* makes sure that this happens correctly for all dim components of the vectors, whether dim be 2, 3, or any other space dimension; from a user's perspective, this is not something worth bothering with, however, making things a lot simpler if one wants to write code dimension independently.

    +

    As a final remark to these loops: when we assemble the local contributions into cell_matrix(i,j), we have to multiply the gradients of shape functions $i$ and $j$ at point number q_index and multiply it with the scalar weights JxW. This is what actually happens: fe_values.shape_grad(i,q_index) returns a dim dimensional vector, represented by a Tensor<1,dim> object, and the operator* that multiplies it with the result of fe_values.shape_grad(j,q_index) makes sure that the dim components of the two vectors are properly contracted, and the result is a scalar floating point number that then is multiplied with the weights. Internally, this operator* makes sure that this happens correctly for all dim components of the vectors, whether dim be 2, 3, or any other space dimension; from a user's perspective, this is not something worth bothering with, however, making things a lot simpler if one wants to write code dimension independently.

    With the local systems assembled, the transfer into the global matrix and right hand side is done exactly as before, but here we have again merged some loops for efficiency:

      cell->get_dof_indices(local_dof_indices);
      for (const unsigned int i : fe_values.dof_indices())
    @@ -508,7 +508,7 @@ -
    Note
    A final remark on visualization: the idea of visualization is to give insight, which is not the same as displaying information. In particular, it is easy to overload a picture with information, but while it shows more information it makes it also more difficult to glean insight. As an example, the program I used to generate these pictures, VisIt, by default puts tick marks on every axis, puts a big fat label "X Axis" on the $x$ axis and similar for the other axes, shows the file name from which the data was taken in the top left and the name of the user doing so and the time and date on the bottom right. None of this is important here: the axes are equally easy to make out because the tripod at the bottom left is still visible, and we know from the program that the domain is $[-1,1]^3$, so there is no need for tick marks. As a consequence, I have switched off all the extraneous stuff in the picture: the art of visualization is to reduce the picture to those parts that are important to see what one wants to see, but no more.
    +
    Note
    A final remark on visualization: the idea of visualization is to give insight, which is not the same as displaying information. In particular, it is easy to overload a picture with information, but while it shows more information it makes it also more difficult to glean insight. As an example, the program I used to generate these pictures, VisIt, by default puts tick marks on every axis, puts a big fat label "X Axis" on the $x$ axis and similar for the other axes, shows the file name from which the data was taken in the top left and the name of the user doing so and the time and date on the bottom right. None of this is important here: the axes are equally easy to make out because the tripod at the bottom left is still visible, and we know from the program that the domain is $[-1,1]^3$, so there is no need for tick marks. As a consequence, I have switched off all the extraneous stuff in the picture: the art of visualization is to reduce the picture to those parts that are important to see what one wants to see, but no more.

    Postprocessing: What to do with the solution?

    This tutorial – like most of the other programs – principally only shows how to numerically approximate the solution of a partial differential equation, and then how to visualize this solution graphically. But solving a PDE is of course not the goal in most practical applications (unless you are a numerical methods developer and the method is the goal): We generally want to solve a PDE because we want to extract information from it. Examples for what people are interested in from solutions include the following:

    The point here is that from an engineering perspective, solving the PDE is only the first step. The second step is to evaluate the computed solution in order to extract relevant numbers that allow us to either optimize a design, or to make decisions. This second step is often called "postprocessing the solution".

    This program does not solve a solid or fluid mechanics problem, so we should try to illustrate postprocessing with something that makes sense in the context of the equation we solve here. The Poisson equation in two space dimensions is a model for the vertical deformation of a membrane that is clamped at the boundary and is subject to a vertical force. For this kind of situation, it makes sense to evaluate the average vertical displacement,

    -\[
+<picture><source srcset=\[
   \bar u_h = \frac{\int_\Omega u_h(\mathbf x) \, dx}{|\Omega|},
-\] +\]" src="form_4710.png"/>

    -

    where $|\Omega| = \int_\Omega 1 \, dx$ is the area of the domain. To compute $\bar u_h$, as usual we replace integrals over the domain by a sum of integrals over cells,

    -\[
+<p> where <picture><source srcset=$|\Omega| = \int_\Omega 1 \, dx$ is the area of the domain. To compute $\bar u_h$, as usual we replace integrals over the domain by a sum of integrals over cells,

    +\[
   \int_\Omega u_h(\mathbf x) \, dx
   =
   \sum_K \int_K u_h(\mathbf x) \, dx,
-\] +\]" src="form_4713.png"/>

    and then integrals over cells are approximated by quadrature:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \int_\Omega u_h(\mathbf x) \, dx
   &=
   \sum_K \int_K u_h(\mathbf x) \, dx,
   \\
   &=
   \sum_K \sum_q u_h(\mathbf x_q^K) w_q^K,
-\end{align*} +\end{align*}" src="form_4714.png"/>

    -

    where $w_q^K$ is the weight of the $q$th quadrature point evaluated on cell $K$. All of this is as always provided by the FEValues class – the entry point for all integrals in deal.II.

    -

    The actual implementation of this is straightforward once you know how to get the values of the solution $u$ at the quadrature points of a cell. This functionality is provided by FEValues::get_function_values(), a function that takes a global vector of nodal values as input and returns a vector of function values at the quadrature points of the current cell. Using this function, to see how it all works together you can place the following code snippet anywhere in the program after the solution has been computed (the output_results() function seems like a good place to also do postprocessing, for example):

    QGauss<dim> quadrature_formula(fe.degree + 1);
    +

    where $w_q^K$ is the weight of the $q$th quadrature point evaluated on cell $K$. All of this is as always provided by the FEValues class – the entry point for all integrals in deal.II.

    +

    The actual implementation of this is straightforward once you know how to get the values of the solution $u$ at the quadrature points of a cell. This functionality is provided by FEValues::get_function_values(), a function that takes a global vector of nodal values as input and returns a vector of function values at the quadrature points of the current cell. Using this function, to see how it all works together you can place the following code snippet anywhere in the program after the solution has been computed (the output_results() function seems like a good place to also do postprocessing, for example):

    QGauss<dim> quadrature_formula(fe.degree + 1);
    FEValues<dim> fe_values(fe,
    quadrature_formula,
    @@ -561,14 +561,14 @@
    }
    std::cout << " Mean value of u=" << integral_of_u / volume_of_omega
    << std::endl;
    -

    In this code snippet, we also compute the volume (or, since we are currently thinking about a two-dimensional situation: the area) $|\Omega|$ by computing the integral $|\Omega| = \int_\Omega 1 \, dx$ in exactly the same way, via quadrature. (We could avoid having to compute $|\Omega|$ by hand here, using the fact that deal.II has a function for this: GridTools::volume(). That said, it is efficient to compute the two integrals concurrently in the same loop, and so that's what we do.)

    +

    In this code snippet, we also compute the volume (or, since we are currently thinking about a two-dimensional situation: the area) $|\Omega|$ by computing the integral $|\Omega| = \int_\Omega 1 \, dx$ in exactly the same way, via quadrature. (We could avoid having to compute $|\Omega|$ by hand here, using the fact that deal.II has a function for this: GridTools::volume(). That said, it is efficient to compute the two integrals concurrently in the same loop, and so that's what we do.)

    This program of course also solves the same Poisson equation in three space dimensions. In this situation, the Poisson equation is often used as a model for diffusion of either a physical species (say, of ink in a tank of water, or a pollutant in the air) or of energy (specifically, of thermal energy in a solid body). In that context, the quantity

    -\[
+<picture><source srcset=\[
   \Phi_h = \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
-\] +\]" src="form_4717.png"/>

    is the flux of this species or energy across the boundary. (In actual physical models, one would also have to multiply the right hand side by a diffusivity or conductivity constant, but let us ignore this here.) In much the same way as before, we compute such integrals by splitting it over integrals of faces of cells, and then applying quadrature:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Phi_h
   &=
   \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
@@ -582,9 +582,9 @@
   \sum_K
   \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
   \sum_q \nabla u_h(\mathbf x_q^f) \cdot \mathbf n(\mathbf x_q^f) w_q^f,
-\end{align*} +\end{align*}" src="form_4718.png"/>

    -

    where now $\mathbf x_q^f$ are the quadrature points located on face $f$, and $w_q^f$ are the weights associated with these faces. The second of the sum symbols loops over all faces of cell $K$, but restricted to those that are actually at the boundary.

    +

    where now $\mathbf x_q^f$ are the quadrature points located on face $f$, and $w_q^f$ are the weights associated with these faces. The second of the sum symbols loops over all faces of cell $K$, but restricted to those that are actually at the boundary.

    This all is easily implemented by the following code that replaces the use of the FEValues class (which is used for integrating over cells – i.e., domain integrals) by the FEFaceValues class (which is used for integrating over faces – i.e., boundary integrals):

    QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
    FEFaceValues<dim> fe_face_values(fe,
    face_quadrature_formula,
    @@ -626,14 +626,14 @@
    30 CG iterations needed to obtain convergence.
    Mean value of u=1.58058
    Flux=-8.29435
    -

    This makes some sense: If you look, for example, at the 2d output above, the solution varies between values of 1 and 2, but with a larger part of the solution closer to one than two; so an average value of 1.33 for the mean value is reasonable. For the flux, recall that $\nabla u \cdot \mathbf n$ is the directional derivative in the normal direction – in other words, how the solution changes as we move from the interior of the domain towards the boundary. If you look at the 2d solution, you will realize that for most parts of the boundary, the solution decreases as we approach the boundary, so the normal derivative is negative – so if we integrate along the boundary, we should expect (and obtain!) a negative value.

    +

    This makes some sense: If you look, for example, at the 2d output above, the solution varies between values of 1 and 2, but with a larger part of the solution closer to one than two; so an average value of 1.33 for the mean value is reasonable. For the flux, recall that $\nabla u \cdot \mathbf n$ is the directional derivative in the normal direction – in other words, how the solution changes as we move from the interior of the domain towards the boundary. If you look at the 2d solution, you will realize that for most parts of the boundary, the solution decreases as we approach the boundary, so the normal derivative is negative – so if we integrate along the boundary, we should expect (and obtain!) a negative value.

    Possibilities for extensions

    There are many ways with which one can play with this program. The simpler ones include essentially all the possibilities already discussed in the Possibilities for extensions in the documentation of step 3, except that you will have to think about whether something now also applies to the 3d case discussed in the current program.

    It is also worthwhile considering the postprocessing options discussed above. The documentation states two numbers (the mean value and the normal flux) for both the 2d and 3d cases. Can we trust these numbers? We have convinced ourselves that at least the mean value is reasonable, and that the sign of the flux is probably correct. But are these numbers accurate?

    -

    A general rule is that we should never trust a number unless we have verified it in some way. From the theory of finite element methods, we know that as we make the mesh finer and finer, the numerical solution $u_h$ we compute here must converge to the exact solution $u$. As a consequence, we also expect that $\bar u_h \rightarrow \bar u$ and $\Phi_h \rightarrow \Phi$, but that does not mean that for any given mesh $\bar u_h$ or $\Phi_h$ are particularly accurate approximations.

    +

    A general rule is that we should never trust a number unless we have verified it in some way. From the theory of finite element methods, we know that as we make the mesh finer and finer, the numerical solution $u_h$ we compute here must converge to the exact solution $u$. As a consequence, we also expect that $\bar u_h \rightarrow \bar u$ and $\Phi_h \rightarrow \Phi$, but that does not mean that for any given mesh $\bar u_h$ or $\Phi_h$ are particularly accurate approximations.

    To test this kind of thing, we have already considered the convergence of a point value in step-3. We can do the same here by selecting how many times the mesh is globally refined in the make_grid() function of this program. For the mean value of the solution, we then get the following numbers:

    - + @@ -650,7 +650,7 @@

    I did not have the patience to run the last two values for the 3d case – one needs quite a fine mesh for this, with correspondingly long run times. But we can be reasonably assured that values around 1.33 (for the 2d case) and 1.58 (for the 3d case) are about right – and at least for engineering applications, three digits of accuracy are good enough.

    The situation looks very different for the flux. Here, we get results such as the following:

    #href_anchor"form_4714_dark.png" media="(prefers-color-scheme: dark)"/>$\bar u_h$ in 2d $\bar u_h$ in 3d
    #href_anchor"form_4712_dark.png" media="(prefers-color-scheme: dark)"/>$\bar u_h$ in 2d $\bar u_h$ in 3d
    4 1.33303 1.58058
    - + @@ -670,15 +670,15 @@
    # of refinements $\Phi_h$ in 2d $\Phi_h$ in 3d
    # of refinements $\Phi_h$ in 2d $\Phi_h$ in 3d
    4 -3.68956 -8.29435

    So this is not great. For the 2d case, we might infer that perhaps a value around -6.4 might be right if we just refine the mesh enough – though 11 refinements already leads to some 4,194,304 cells. In any case, the first number (the one shown in the beginning where we discussed postprocessing) was off by almost a factor of 2!

    For the 3d case, the last number shown was on a mesh with 2,097,152 cells; the next one would have had 8 times as many cells. In any case, the numbers mean that we can't even be sure that the first digit of that last number is correct! In other words, it was worth checking, or we would have just believed all of these numbers. In fact, that last column isn't even doing a particularly good job convincing us that the code might be correctly implemented.

    -

    If you keep reading through the other tutorial programs, you will find many ways to make these sorts of computations more accurate and to come to believe that the flux actually does converge to its correct value. For example, we can dramatically increase the accuracy of the computation by using adaptive mesh refinement (step-6) near the boundary, and in particular by using higher polynomial degree finite elements (also step-6, but also step-7). Using the latter, using cubic elements (polynomial degree 3), we can actually compute the flux pretty accurately even in 3d: $\Phi_h=-19.0148$ with 4 global refinement steps, and $\Phi_h=-19.1533$ with 5 refinement steps. These numbers are already pretty close together and give us a reasonable idea of the first two correct digits of the "true" answer.

    -
    Note
    We would be remiss to not also comment on the fact that there are good theoretical reasons why computing the flux accurately appears to be so much more difficult than the average value. This has to do with the fact that finite element theory provides us with the estimate $\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}$ when using the linear elements this program uses – that is, for every global mesh refinement, $h$ is reduced by a factor of two and the error goes down by a factor of 4. Now, the $L_2$ error is not equivalent to the error in the mean value, but the two are related: They are both integrals over the domain, using the value of the solution. We expect the mean value to converge no worse than the $L_2$ norm of the error. At the same time, theory also provides us with this estimate: $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
-    C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}$. The move from values to gradients reduces the convergence rates by one order, and the move from domain to boundary by another half order. Here, then, each refinement step reduces the error not by a factor of 4 any more, by only by a factor of $\sqrt{2} \approx 1.4$. It takes a lot of global refinement steps to reduce the error by, say, a factor ten or hundred, and this is reflected in the very slow convergence evidenced by the table. On the other hand, for cubic elements (i.e., polynomial degree 3), we would get $\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}$ and after reduction by 1.5 orders, we would still have $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
-    C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}$. This rate, ${\cal O}(h^{2.5})$ is still quite rapid, and it is perhaps not surprising that we get much better answers with these higher order elements. This also illustrates that when trying to approximate anything that relates to a gradient of the solution, using linear elements (polynomial degree one) is really not a good choice at all.
    +

    If you keep reading through the other tutorial programs, you will find many ways to make these sorts of computations more accurate and to come to believe that the flux actually does converge to its correct value. For example, we can dramatically increase the accuracy of the computation by using adaptive mesh refinement (step-6) near the boundary, and in particular by using higher polynomial degree finite elements (also step-6, but also step-7). Using the latter, using cubic elements (polynomial degree 3), we can actually compute the flux pretty accurately even in 3d: $\Phi_h=-19.0148$ with 4 global refinement steps, and $\Phi_h=-19.1533$ with 5 refinement steps. These numbers are already pretty close together and give us a reasonable idea of the first two correct digits of the "true" answer.

    +
    Note
    We would be remiss to not also comment on the fact that there are good theoretical reasons why computing the flux accurately appears to be so much more difficult than the average value. This has to do with the fact that finite element theory provides us with the estimate $\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}$ when using the linear elements this program uses – that is, for every global mesh refinement, $h$ is reduced by a factor of two and the error goes down by a factor of 4. Now, the $L_2$ error is not equivalent to the error in the mean value, but the two are related: They are both integrals over the domain, using the value of the solution. We expect the mean value to converge no worse than the $L_2$ norm of the error. At the same time, theory also provides us with this estimate: $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
+    C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}$. The move from values to gradients reduces the convergence rates by one order, and the move from domain to boundary by another half order. Here, then, each refinement step reduces the error not by a factor of 4 any more, by only by a factor of $\sqrt{2} \approx 1.4$. It takes a lot of global refinement steps to reduce the error by, say, a factor ten or hundred, and this is reflected in the very slow convergence evidenced by the table. On the other hand, for cubic elements (i.e., polynomial degree 3), we would get $\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}$ and after reduction by 1.5 orders, we would still have $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
+    C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}$. This rate, ${\cal O}(h^{2.5})$ is still quite rapid, and it is perhaps not surprising that we get much better answers with these higher order elements. This also illustrates that when trying to approximate anything that relates to a gradient of the solution, using linear elements (polynomial degree one) is really not a good choice at all.
    -In this very specific case, it turns out that we can actually compute the exact value of $\Phi$. This is because for the Poisson equation we compute the solution of here, $-\Delta u = f$, we can integrate over the domain, $-\int_\Omega \Delta u = \int_\Omega f$, and then use that $\Delta = \text{div}\;\text{grad}$; this allows us to use the divergence theorem followed by multiplying by minus one to find $\int_{\partial\Omega} \nabla u \cdot n = -\int_\Omega f$. The left hand side happens to be $\Phi$. For the specific right hand side $f(x_1,x_2)=4(x_1^4+x_2^4)$ we use in 2d, we then get $-\int_\Omega f = -\int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4) \; dx_2\; dx_1
-  = -16 \left[\int_{-1}^{1} x^4 \; dx\right] = -16\times\frac 25$, which has a numerical value of exactly -6.4 – right on with our guess above. In 3d, we can do the same and get that the exact value is $-\int_\Omega f =
+In this very specific case, it turns out that we can actually compute the exact value of <picture><source srcset=$\Phi$. This is because for the Poisson equation we compute the solution of here, $-\Delta u = f$, we can integrate over the domain, $-\int_\Omega \Delta u = \int_\Omega f$, and then use that $\Delta = \text{div}\;\text{grad}$; this allows us to use the divergence theorem followed by multiplying by minus one to find $\int_{\partial\Omega} \nabla u \cdot n = -\int_\Omega f$. The left hand side happens to be $\Phi$. For the specific right hand side $f(x_1,x_2)=4(x_1^4+x_2^4)$ we use in 2d, we then get $-\int_\Omega f = -\int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4) \; dx_2\; dx_1
+  = -16 \left[\int_{-1}^{1} x^4 \; dx\right] = -16\times\frac 25$, which has a numerical value of exactly -6.4 – right on with our guess above. In 3d, we can do the same and get that the exact value is $-\int_\Omega f =
    -\int_{-1}^{1} \int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4+x_3^4) \; dx_3 \; dx_2\; dx_1
-   = -48\times\frac 25=-19.2$. What we found with cubic elements is then quite close to this exact value. Of course, in practice we almost never have exact values to compare with: If we could compute something on a piece of paper, we wouldn't have to solve the PDE numerically. But these sorts of situations make for excellent test cases that help us verify that our numerical solver works correctly. In many other cases, the literature contains numbers where others have already computed an answer accurately using their own software, and these are also often useful to compare against in verifying the correctness of our codes.
    + = -48\times\frac 25=-19.2$" src="form_4739.png"/>. What we found with cubic elements is then quite close to this exact value. Of course, in practice we almost never have exact values to compare with: If we could compute something on a piece of paper, we wouldn't have to solve the PDE numerically. But these sorts of situations make for excellent test cases that help us verify that our numerical solver works correctly. In many other cases, the literature contains numbers where others have already computed an answer accurately using their own software, and these are also often useful to compare against in verifying the correctness of our codes.

    The plain program

    /* ---------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 2024-03-17 21:57:46.219251444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 2024-03-17 21:57:46.223251469 +0000 @@ -136,13 +136,13 @@

    A general overview of how this parallelization happens is described in the Parallel computing with multiple processors using distributed memory documentation module. You should read it for a top-level overview before reading through the source code of this program. A concise discussion of many terms we will use in the program is also provided in the Distributed Computing paper. It is probably worthwhile reading it for background information on how things work internally in this program.

    The testcase

    This program essentially re-solves what we already do in step-6, i.e. it solves the Laplace equation

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\Delta u &= f \qquad &&\text{in}\ \Omega=[0,1]^2, \\
   u &= 0 \qquad &&\text{on}\ \partial\Omega.
-\end{align*} +\end{align*}" src="form_4740.png"/>

    The difference of course is now that we want to do so on a mesh that may have a billion cells, with a billion or so degrees of freedom. There is no doubt that doing so is completely silly for such a simple problem, but the point of a tutorial program is, after all, not to do something useful but to show how useful programs can be implemented using deal.II. Be that as it may, to make things at least a tiny bit interesting, we choose the right hand side as a discontinuous function,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   f(x,y)
   =
   \left\{
@@ -151,7 +151,7 @@
     -1 & \text{otherwise},
   \end{array}
   \right.
-\end{align*} +\end{align*}" src="form_4741.png"/>

    so that the solution has a singularity along the sinusoidal line snaking its way through the domain. As a consequence, mesh refinement will be concentrated along this line. You can see this in the mesh picture shown below in the results section.

    Rather than continuing here and giving a long introduction, let us go straight to the program code. If you have read through step-6 and the Parallel computing with multiple processors using distributed memory documentation module, most of things that are going to happen should be familiar to you already. In fact, comparing the two programs you will notice that the additional effort necessary to make things work in parallel is almost insignificant: the two programs have about the same number of lines of code (though step-6 spends more space on dealing with coefficients and output). In either case, the comments below will only be on the things that set step-40 apart from step-6 and that aren't already covered in the Parallel computing with multiple processors using distributed memory documentation module.

    @@ -199,7 +199,7 @@
     

    The following, however, will be new or be used in new roles. Let's walk through them. The first of these will provide the tools of the Utilities::System namespace that we will use to query things like the number of processors associated with the current MPI universe, or the number within this universe the processor this job runs on is:

      #href_anchor"fragment">
      #include <deal.II/base/conditional_ostream.h>
    -

    After these preliminaries, here is where it becomes more interesting. As mentioned in the Parallel computing with multiple processors using distributed memory module, one of the fundamental truths of solving problems on large numbers of processors is that there is no way for any processor to store everything (e.g. information about all cells in the mesh, all degrees of freedom, or the values of all elements of the solution vector). Rather, every processor will own a few of each of these and, if necessary, may know about a few more, for example the ones that are located on cells adjacent to the ones this processor owns itself. We typically call the latter ghost cells, ghost nodes or ghost elements of a vector. The point of this discussion here is that we need to have a way to indicate which elements a particular processor owns or need to know of. This is the realm of the IndexSet class: if there are a total of $N$ cells, degrees of freedom, or vector elements, associated with (non-negative) integral indices $[0,N)$, then both the set of elements the current processor owns as well as the (possibly larger) set of indices it needs to know about are subsets of the set $[0,N)$. IndexSet is a class that stores subsets of this set in an efficient format:

    +

    After these preliminaries, here is where it becomes more interesting. As mentioned in the Parallel computing with multiple processors using distributed memory module, one of the fundamental truths of solving problems on large numbers of processors is that there is no way for any processor to store everything (e.g. information about all cells in the mesh, all degrees of freedom, or the values of all elements of the solution vector). Rather, every processor will own a few of each of these and, if necessary, may know about a few more, for example the ones that are located on cells adjacent to the ones this processor owns itself. We typically call the latter ghost cells, ghost nodes or ghost elements of a vector. The point of this discussion here is that we need to have a way to indicate which elements a particular processor owns or need to know of. This is the realm of the IndexSet class: if there are a total of $N$ cells, degrees of freedom, or vector elements, associated with (non-negative) integral indices $[0,N)$, then both the set of elements the current processor owns as well as the (possibly larger) set of indices it needs to know about are subsets of the set $[0,N)$. IndexSet is a class that stores subsets of this set in an efficient format:

      #href_anchor"el" href="namespaceSparsityTools.html#a6b5444028171035f8ffb3fb5c3f8da08">SparsityTools::distribute_sparsity_pattern. The role of this function will be explained below.

      #include <deal.II/lac/sparsity_tools.h>

    The final two, new header files provide the class parallel::distributed::Triangulation that provides meshes distributed across a potentially very large number of processors, while the second provides the namespace parallel::distributed::GridRefinement that offers functions that can adaptively refine such distributed meshes:

    @@ -688,7 +688,7 @@

    What these graphs show is that all parts of the program scale linearly with the number of degrees of freedom. This time, lines are wobbly at the left as the size of local problems is too small. For more discussions of these results we refer to the Distributed Computing paper.

    -

    So how large are the largest problems one can solve? At the time of writing this problem, the limiting factor is that the program uses the BoomerAMG algebraic multigrid method from the Hypre package as a preconditioner, which unfortunately uses signed 32-bit integers to index the elements of a distributed matrix. This limits the size of problems to $2^{31}-1=2,147,483,647$ degrees of freedom. From the graphs above it is obvious that the scalability would extend beyond this number, and one could expect that given more than the 4,096 machines shown above would also further reduce the compute time. That said, one can certainly expect that this limit will eventually be lifted by the hypre developers.

    +

    So how large are the largest problems one can solve? At the time of writing this problem, the limiting factor is that the program uses the BoomerAMG algebraic multigrid method from the Hypre package as a preconditioner, which unfortunately uses signed 32-bit integers to index the elements of a distributed matrix. This limits the size of problems to $2^{31}-1=2,147,483,647$ degrees of freedom. From the graphs above it is obvious that the scalability would extend beyond this number, and one could expect that given more than the 4,096 machines shown above would also further reduce the compute time. That said, one can certainly expect that this limit will eventually be lifted by the hypre developers.

    On the other hand, this does not mean that deal.II cannot solve bigger problems. Indeed, step-37 shows how one can solve problems that are not just a little, but very substantially larger than anything we have shown here.

    Possibilities for extensions

    In a sense, this program is the ultimate solver for the Laplace equation: it can essentially solve the equation to whatever accuracy you want, if only you have enough processors available. Since the Laplace equation by itself is not terribly interesting at this level of accuracy, the more interesting possibilities for extension therefore concern not so much this program but what comes beyond it. For example, several of the other programs in this tutorial have significant run times, especially in 3d. It would therefore be interesting to use the techniques explained here to extend other programs to support parallel distributed computations. We have done this for step-31 in the step-32 tutorial program, but the same would apply to, for example, step-23 and step-25 for hyperbolic time dependent problems, step-33 for gas dynamics, or step-35 for the Navier-Stokes equations.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 2024-03-17 21:57:46.279251814 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 2024-03-17 21:57:46.283251839 +0000 @@ -155,10 +155,10 @@ u(\mathbf x) &\geq g(\mathbf x) & &\quad\text{in } \Omega \end{align*}" src="form_4744.png"/>

    -

    with $u\in H^2(\Omega)$. $u$ is a scalar valued function that denotes the vertical displacement of the membrane. The first equation is called equilibrium condition with a force of areal density $f$. Here, we will consider this force to be gravity. The second one is known as Hooke's Law that says that the stresses $\sigma$ are proportional to the gradient of the displacements $u$ (the proportionality constant, often denoted by $E$, has been set to one here, without loss of generality; if it is constant, it can be put into the right hand side function). At the boundary we have zero Dirichlet conditions. Obviously, the first two equations can be combined to yield $-\Delta u \ge f$.

    -

    Intuitively, gravity acts downward and so $f(\mathbf x)$ is a negative function (we choose $f=-10$ in this program). The first condition then means that the total force acting on the membrane is gravity plus something positive: namely the upward force that the obstacle exerts on the membrane at those places where the two of them are in contact. How big is this additional force? We don't know yet (and neither do we know "where" it actually acts) but it must be so that the membrane doesn't penetrate the obstacle.

    -

    The fourth equality above together with the last inequality forms the obstacle condition which has to hold at every point of the whole domain. The latter of these two means that the membrane must be above the obstacle $g(\mathbf x)$ everywhere. The second to last equation, often called the "complementarity -condition" says that where the membrane is not in contact with the obstacle (i.e., those $\mathbf x$ where $u(\mathbf x) - g(\mathbf x) \neq 0$), then $-\Delta u=f$ at these locations; in other words, no additional forces act there, as expected. On the other hand, where $u=g$ we can have $-\Delta u-f
+<p> with <picture><source srcset=$u\in H^2(\Omega)$. $u$ is a scalar valued function that denotes the vertical displacement of the membrane. The first equation is called equilibrium condition with a force of areal density $f$. Here, we will consider this force to be gravity. The second one is known as Hooke's Law that says that the stresses $\sigma$ are proportional to the gradient of the displacements $u$ (the proportionality constant, often denoted by $E$, has been set to one here, without loss of generality; if it is constant, it can be put into the right hand side function). At the boundary we have zero Dirichlet conditions. Obviously, the first two equations can be combined to yield $-\Delta u \ge f$.

    +

    Intuitively, gravity acts downward and so $f(\mathbf x)$ is a negative function (we choose $f=-10$ in this program). The first condition then means that the total force acting on the membrane is gravity plus something positive: namely the upward force that the obstacle exerts on the membrane at those places where the two of them are in contact. How big is this additional force? We don't know yet (and neither do we know "where" it actually acts) but it must be so that the membrane doesn't penetrate the obstacle.

    +

    The fourth equality above together with the last inequality forms the obstacle condition which has to hold at every point of the whole domain. The latter of these two means that the membrane must be above the obstacle $g(\mathbf x)$ everywhere. The second to last equation, often called the "complementarity +condition" says that where the membrane is not in contact with the obstacle (i.e., those $\mathbf x$ where $u(\mathbf x) - g(\mathbf x) \neq 0$), then $-\Delta u=f$ at these locations; in other words, no additional forces act there, as expected. On the other hand, where $u=g$ we can have $-\Delta u-f
 \neq 0$, i.e., there can be additional forces (though there don't have to be: it is possible for the membrane to just touch, not press against, the obstacle).

    Derivation of the variational inequality

    An obvious way to obtain the variational formulation of the obstacle problem is to consider the total potential energy:

    @@ -177,22 +177,22 @@ \end{equation*}" src="form_4754.png"/>

    This set takes care of the third and fifth conditions above (the boundary values and the complementarity condition).

    -

    Consider now the minimizer $u\in G$ of $E$ and any other function $v\in
+<p>Consider now the minimizer <picture><source srcset=$u\in G$ of $E$ and any other function $v\in
 G$. Then the function

    \begin{equation*}
  F(\varepsilon) \dealcoloneq E(u+\varepsilon(v-u)),\quad\varepsilon\in\left[0,1\right],
 \end{equation*}

    -

    takes its minimum at $\varepsilon = 0$ (because $u$ is a minimizer of the energy functional $E(\cdot)$), so that $F'(0)\geq 0$ for any choice of $v$. Note that $u+\varepsilon(v-u) = (1-\varepsilon)u+\varepsilon v\in G$ because of the convexity of $G$. If we compute $F'(\varepsilon)\vert_{\varepsilon=0}$ it yields the variational formulation we are searching for:

    +

    takes its minimum at $\varepsilon = 0$ (because $u$ is a minimizer of the energy functional $E(\cdot)$), so that $F'(0)\geq 0$ for any choice of $v$. Note that $u+\varepsilon(v-u) = (1-\varepsilon)u+\varepsilon v\in G$ because of the convexity of $G$. If we compute $F'(\varepsilon)\vert_{\varepsilon=0}$ it yields the variational formulation we are searching for:

    Find a function $u\in G$ with

    \begin{equation*}
  \left(\nabla u, \nabla(v-u)\right) \geq \left(f,v-u\right) \quad \forall v\in G.
 \end{equation*}

    -

    This is the typical form of variational inequalities, where not just $v$ appears in the bilinear form but in fact $v-u$. The reason is this: if $u$ is not constrained, then we can find test functions $v$ in $G$ so that $v-u$ can have any sign. By choosing test functions $v_1,v_2$ so that $v_1-u = -(v_2-u)$ it follows that the inequality can only hold for both $v_1$ and $v_2$ if the two sides are in fact equal, i.e., we obtain a variational equality.

    -

    On the other hand, if $u=g$ then $G$ only allows test functions $v$ so that in fact $v-u\ge 0$. This means that we can't test the equation with both $v-u$ and $-(v-u)$ as above, and so we can no longer conclude that the two sides are in fact equal. Thus, this mimics the way we have discussed the complementarity condition above.

    +

    This is the typical form of variational inequalities, where not just $v$ appears in the bilinear form but in fact $v-u$. The reason is this: if $u$ is not constrained, then we can find test functions $v$ in $G$ so that $v-u$ can have any sign. By choosing test functions $v_1,v_2$ so that $v_1-u = -(v_2-u)$ it follows that the inequality can only hold for both $v_1$ and $v_2$ if the two sides are in fact equal, i.e., we obtain a variational equality.

    +

    On the other hand, if $u=g$ then $G$ only allows test functions $v$ so that in fact $v-u\ge 0$. This means that we can't test the equation with both $v-u$ and $-(v-u)$ as above, and so we can no longer conclude that the two sides are in fact equal. Thus, this mimics the way we have discussed the complementarity condition above.

    Formulation as a saddle point problem

    -

    The variational inequality above is awkward to work with. We would therefore like to reformulate it as an equivalent saddle point problem. We introduce a Lagrange multiplier $\lambda$ and the convex cone $K\subset V'$, $V'$ dual space of $V$, $K \dealcoloneq \{\mu\in V': \langle\mu,v\rangle\geq 0,\quad \forall
+<p>The variational inequality above is awkward to work with. We would therefore like to reformulate it as an equivalent saddle point problem. We introduce a Lagrange multiplier <picture><source srcset=$\lambda$ and the convex cone $K\subset V'$, $V'$ dual space of $V$, $K \dealcoloneq \{\mu\in V': \langle\mu,v\rangle\geq 0,\quad \forall
 v\in V, v \le 0 \}$ of Lagrange multipliers, where $\langle\cdot,\cdot\rangle$ denotes the duality pairing between $V'$ and $V$. Intuitively, $K$ is the cone of all "non-positive functions", except that $K\subset (H_0^1)'$ and so contains other objects besides regular functions as well. This yields:

    Find $u\in V$ and $\lambda\in K$ such that

    @@ -207,25 +207,25 @@ b(u,\mu) &\dealcoloneq \langle u,\mu\rangle,\quad &&u\in V,\quad\mu\in V'. \end{align*}" src="form_4776.png"/>

    -

    In other words, we can consider $\lambda$ as the negative of the additional, positive force that the obstacle exerts on the membrane. The inequality in the second line of the statement above only appears to have the wrong sign because we have $\mu-\lambda<0$ at points where $\lambda=0$, given the definition of $K$.

    +

    In other words, we can consider $\lambda$ as the negative of the additional, positive force that the obstacle exerts on the membrane. The inequality in the second line of the statement above only appears to have the wrong sign because we have $\mu-\lambda<0$ at points where $\lambda=0$, given the definition of $K$.

    The existence and uniqueness of $(u,\lambda)\in V\times K$ of this saddle point problem has been stated in Glowinski, Lions and Trémolières: Numerical Analysis of Variational Inequalities, North-Holland, 1981.

    Active Set methods to solve the saddle point problem

    There are different methods to solve the variational inequality. As one possibility you can understand the saddle point problem as a convex quadratic program (QP) with inequality constraints.

    -

    To get there, let us assume that we discretize both $u$ and $\lambda$ with the same finite element space, for example the usual $Q_k$ spaces. We would then get the equations

    +

    To get there, let us assume that we discretize both $u$ and $\lambda$ with the same finite element space, for example the usual $Q_k$ spaces. We would then get the equations

    \begin{eqnarray*}
  &A U + B\Lambda = F,&\\
  &[BU-G]_i \geq 0, \quad \Lambda_i \leq 0,\quad \Lambda_i[BU-G]_i = 0
 \qquad \forall i.&
 \end{eqnarray*}

    -

    where $B$ is the mass matrix on the chosen finite element space and the indices $i$ above are for all degrees of freedom in the set $\cal S$ of degrees of freedom located in the interior of the domain (we have Dirichlet conditions on the perimeter). However, we can make our life simpler if we use a particular quadrature rule when assembling all terms that yield this mass matrix, namely a quadrature formula where quadrature points are only located at the interpolation points at which shape functions are defined; since all but one shape function are zero at these locations, we get a diagonal mass matrix with

    +

    where $B$ is the mass matrix on the chosen finite element space and the indices $i$ above are for all degrees of freedom in the set $\cal S$ of degrees of freedom located in the interior of the domain (we have Dirichlet conditions on the perimeter). However, we can make our life simpler if we use a particular quadrature rule when assembling all terms that yield this mass matrix, namely a quadrature formula where quadrature points are only located at the interpolation points at which shape functions are defined; since all but one shape function are zero at these locations, we get a diagonal mass matrix with

    \begin{align*}
   B_{ii} = \int_\Omega \varphi_i(\mathbf x)^2\ \textrm{d}x,
   \qquad
   B_{ij}=0 \ \text{for } i\neq j.
 \end{align*}

    -

    To define $G$ we use the same technique as for $B$. In other words, we define

    +

    To define $G$ we use the same technique as for $B$. In other words, we define

    \begin{align*}
   G_{i} = \int_\Omega g_h(x) \varphi_i(\mathbf x)\ \textrm{d}x,
 \end{align*} @@ -237,7 +237,7 @@ \qquad \forall i\in{\cal S}.& \end{eqnarray*}" src="form_4787.png"/>

    -

    Now we define for each degree of freedom $i$ the function

    +

    Now we define for each degree of freedom $i$ the function

    \begin{equation*}
  C([BU]_i,\Lambda_i) \dealcoloneq -\Lambda_i + \min\lbrace 0, \Lambda_i + c([BU]_i - G_i) \rbrace,
 \end{equation*} @@ -248,7 +248,7 @@ C([BU]_i,\Lambda_i) = 0, \qquad \forall i\in{\cal S}. \end{equation*}" src="form_4792.png"/>

    -

    The primal-dual active set strategy we will use here is an iterative scheme which is based on this condition to predict the next active and inactive sets $\mathcal{A}_k$ and $\mathcal{F}_k$ (that is, those complementary sets of indices $i$ for which $U_i$ is either equal to or not equal to the value of the obstacle $B^{-1}G$). For a more in depth treatment of this approach, see Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.

    +

    The primal-dual active set strategy we will use here is an iterative scheme which is based on this condition to predict the next active and inactive sets $\mathcal{A}_k$ and $\mathcal{F}_k$ (that is, those complementary sets of indices $i$ for which $U_i$ is either equal to or not equal to the value of the obstacle $B^{-1}G$). For a more in depth treatment of this approach, see Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.

    The primal-dual active set algorithm

    The algorithm for the primal-dual active set method works as follows (NOTE: $B = B^T$):

      @@ -483,7 +483,7 @@
      const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation

    Right hand side, boundary values, and the obstacle

    -

    In the following, we define classes that describe the right hand side function, the Dirichlet boundary values, and the height of the obstacle as a function of $\mathbf x$. In all three cases, we derive these classes from Function<dim>, although in the case of RightHandSide and Obstacle this is more out of convention than necessity since we never pass such objects to the library. In any case, the definition of the right hand side and boundary values classes is obvious given our choice of $f=-10$, $u|_{\partial\Omega}=0$:

    +

    In the following, we define classes that describe the right hand side function, the Dirichlet boundary values, and the height of the obstacle as a function of $\mathbf x$. In all three cases, we derive these classes from Function<dim>, although in the case of RightHandSide and Obstacle this is more out of convention than necessity since we never pass such objects to the library. In any case, the definition of the right hand side and boundary values classes is obvious given our choice of $f=-10$, $u|_{\partial\Omega}=0$:

      template <int dim>
      class RightHandSide : public Function<dim>
      {
    @@ -557,7 +557,7 @@
     
     

    ObstacleProblem::make_grid

    -

    We solve our obstacle problem on the square $[-1,1]\times [-1,1]$ in 2d. This function therefore just sets up one of the simplest possible meshes.

    +

    We solve our obstacle problem on the square $[-1,1]\times [-1,1]$ in 2d. This function therefore just sets up one of the simplest possible meshes.

      template <int dim>
      void ObstacleProblem<dim>::make_grid()
      {
    @@ -683,7 +683,7 @@
    @ update_gradients
    Shape function gradients.
    @ update_quadrature_points
    Transformed quadrature points.

    ObstacleProblem::assemble_mass_matrix_diagonal

    -

    The next function is used in the computation of the diagonal mass matrix $B$ used to scale variables in the active set method. As discussed in the introduction, we get the mass matrix to be diagonal by choosing the trapezoidal rule for quadrature. Doing so we don't really need the triple loop over quadrature points, indices $i$ and indices $j$ any more and can, instead, just use a double loop. The rest of the function is obvious given what we have discussed in many of the previous tutorial programs.

    +

    The next function is used in the computation of the diagonal mass matrix $B$ used to scale variables in the active set method. As discussed in the introduction, we get the mass matrix to be diagonal by choosing the trapezoidal rule for quadrature. Doing so we don't really need the triple loop over quadrature points, indices $i$ and indices $j$ any more and can, instead, just use a double loop. The rest of the function is obvious given what we have discussed in many of the previous tutorial programs.

    Note that at the time this function is called, the constraints object only contains boundary value constraints; we therefore do not have to pay attention in the last copy-local-to-global step to preserve the values of matrix entries that may later on be constrained by the active set.

    Note also that the trick with the trapezoidal rule only works if we have in fact $Q_1$ elements. For higher order elements, one would need to use a quadrature formula that has quadrature points at all the support points of the finite element. Constructing such a quadrature formula isn't really difficult, but not the point here, and so we simply assert at the top of the function that our implicit assumption about the finite element is in fact satisfied.

      template <int dim>
    @@ -803,7 +803,7 @@
      }
     

    ObstacleProblem::solve

    -

    There is nothing to say really about the solve function. In the context of a Newton method, we are not typically interested in very high accuracy (why ask for a highly accurate solution of a linear problem that we know only gives us an approximation of the solution of the nonlinear problem), and so we use the ReductionControl class that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-12}$) or when the residual is reduced by a certain factor (here, $10^{-3}$).

    +

    There is nothing to say really about the solve function. In the context of a Newton method, we are not typically interested in very high accuracy (why ask for a highly accurate solution of a linear problem that we know only gives us an approximation of the solution of the nonlinear problem), and so we use the ReductionControl class that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-12}$) or when the residual is reduced by a certain factor (here, $10^{-3}$).

      template <int dim>
      void ObstacleProblem<dim>::solve()
      {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2024-03-17 21:57:46.407252606 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2024-03-17 21:57:46.411252629 +0000 @@ -158,7 +158,7 @@

    Since you can very easily reach a few million degrees of freedom in three dimensions, even with adaptive mesh refinement, we decided to use Trilinos and p4est to run our code in parallel, building on the framework of step-40 for the parallelization. Additional pointers for parallelization can be found in step-32.

    Classical formulation

    The classical formulation of the problem possesses the following form:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  \varepsilon(\mathbf u) &= A\sigma + \varepsilon^p & &\quad\text{in } \Omega,\\
   -\textrm{div}\ \sigma &= \mathbf f & &\quad\text{in } \Omega,\\
   \varepsilon^p:(\tau - \sigma) &\geq 0\quad\forall\tau\text{ with
@@ -170,28 +170,28 @@
   (\mathbf n \cdot (\sigma
   \mathbf n))(\mathbf n \cdot \mathbf u - g) &= 0,\quad \mathbf n
   \cdot \mathbf u - g \leq 0 & &\quad\text{on } \Gamma_C.
-\end{align*} +\end{align*}" src="form_4838.png"/>

    -

    Here, the first of these equations defines the relationship between strain $\varepsilon(\mathbf u)=\frac{1}{2}\left(\nabla \mathbf u
-  + \nabla \mathbf u^T\right)$ and stress $\sigma$ via the fourth-order compliance tensor $A$; $\varepsilon^p$ provides the plastic component of the strain to ensure that the stress does not exceed the yield stress. We will only consider isotropic materials for which $A$ can be expressed in terms of the Lamé moduli $\lambda$ and $\mu$ or alternatively in terms of the bulk modulus $\kappa$ and $\mu$. The second equation is the force balance; we will here not consider any body forces and henceforth assume that $\mathbf f=0$. The complementarity condition in the third line implies that $\varepsilon^p=0$ if $\mathcal{F}(\sigma)< 0$ but that $\varepsilon^p$ may be a nonzero tensor if and only if $\mathcal{F}(\sigma) = 0$, and in particular that in this case $\varepsilon^p$ must point in the direction $\partial
-\mathcal{F}(\sigma)/\partial \sigma$. The inequality $\mathcal{F}(\sigma)\le 0$ is a statement of the fact that plastic materials can only support a finite amount of stress; in other words, they react with plastic deformations $\varepsilon^p$ if external forces would result in a stress $\sigma$ for which $\mathcal{F}(\sigma)> 0$ would result. A typical form for this yield function is $\mathcal{F}(\sigma)=|\sigma^D|-\sigma_{\text{yield}}$ where $\tau^D
-= \tau - \dfrac{1}{3}tr(\tau)I$ is the deviatoric part of a tensor and $|\cdot|$ denotes the Frobenius norm.

    -

    Further equations describe a fixed, zero displacement on $\Gamma_D$ and that on the surface $\Gamma_C=\partial\Omega\backslash\Gamma_D$ where contact may appear, the normal force $\sigma_n=\mathbf n \cdot (\sigma(\mathbf u)
-  \mathbf n)$ exerted by the obstacle is inward (no "pull" by the obstacle on our body) and with zero tangential component $\mathbf \sigma_t= \sigma \mathbf n - \mathbf \sigma_n \mathbf n
-= \sigma \mathbf n - [\mathbf n \cdot(\sigma \mathbf n)]\mathbf n$. The last condition is again a complementarity condition that implies that on $\Gamma_C$, the normal force can only be nonzero if the body is in contact with the obstacle; the second part describes the impenetrability of the obstacle and the body. The last two equations are commonly referred to as the Signorini contact conditions.

    -

    Most materials - especially metals - have the property that they show some hardening as a result of deformation. In other words, $\sigma_{\text{yield}}$ increases with deformation. In practice, it is not the elastic deformation that results in hardening, but the plastic component. There are different constitutive laws to describe those material behaviors. The simplest one is called linear isotropic hardening described by the flow function $\mathcal{F}(\sigma,\varepsilon^p) = \vert\sigma^D\vert - (\sigma_0 +
-\gamma^{\text{iso}}|\varepsilon^p|)$.

    +

    Here, the first of these equations defines the relationship between strain $\varepsilon(\mathbf u)=\frac{1}{2}\left(\nabla \mathbf u
+  + \nabla \mathbf u^T\right)$ and stress $\sigma$ via the fourth-order compliance tensor $A$; $\varepsilon^p$ provides the plastic component of the strain to ensure that the stress does not exceed the yield stress. We will only consider isotropic materials for which $A$ can be expressed in terms of the Lamé moduli $\lambda$ and $\mu$ or alternatively in terms of the bulk modulus $\kappa$ and $\mu$. The second equation is the force balance; we will here not consider any body forces and henceforth assume that $\mathbf f=0$. The complementarity condition in the third line implies that $\varepsilon^p=0$ if $\mathcal{F}(\sigma)< 0$ but that $\varepsilon^p$ may be a nonzero tensor if and only if $\mathcal{F}(\sigma) = 0$, and in particular that in this case $\varepsilon^p$ must point in the direction $\partial
+\mathcal{F}(\sigma)/\partial \sigma$. The inequality $\mathcal{F}(\sigma)\le 0$ is a statement of the fact that plastic materials can only support a finite amount of stress; in other words, they react with plastic deformations $\varepsilon^p$ if external forces would result in a stress $\sigma$ for which $\mathcal{F}(\sigma)> 0$ would result. A typical form for this yield function is $\mathcal{F}(\sigma)=|\sigma^D|-\sigma_{\text{yield}}$ where $\tau^D
+= \tau - \dfrac{1}{3}tr(\tau)I$ is the deviatoric part of a tensor and $|\cdot|$ denotes the Frobenius norm.

    +

    Further equations describe a fixed, zero displacement on $\Gamma_D$ and that on the surface $\Gamma_C=\partial\Omega\backslash\Gamma_D$ where contact may appear, the normal force $\sigma_n=\mathbf n \cdot (\sigma(\mathbf u)
+  \mathbf n)$ exerted by the obstacle is inward (no "pull" by the obstacle on our body) and with zero tangential component $\mathbf \sigma_t= \sigma \mathbf n - \mathbf \sigma_n \mathbf n
+= \sigma \mathbf n - [\mathbf n \cdot(\sigma \mathbf n)]\mathbf n$. The last condition is again a complementarity condition that implies that on $\Gamma_C$, the normal force can only be nonzero if the body is in contact with the obstacle; the second part describes the impenetrability of the obstacle and the body. The last two equations are commonly referred to as the Signorini contact conditions.

    +

    Most materials - especially metals - have the property that they show some hardening as a result of deformation. In other words, $\sigma_{\text{yield}}$ increases with deformation. In practice, it is not the elastic deformation that results in hardening, but the plastic component. There are different constitutive laws to describe those material behaviors. The simplest one is called linear isotropic hardening described by the flow function $\mathcal{F}(\sigma,\varepsilon^p) = \vert\sigma^D\vert - (\sigma_0 +
+\gamma^{\text{iso}}|\varepsilon^p|)$.

    Reformulation as a variational inequality

    It is generally rather awkward to deal with inequalities. Here, we have to deal with two: plasticity and the contact problem. As described in more detail in the paper mentioned at the top of this page, one can at least reformulate the plasticity in a way that makes it look like a nonlinearity that we can then treat with Newton's method. This is slightly tricky mathematically since the nonlinearity is not just some smooth function but instead has kinks where the stress reaches the yield stress; however, it can be shown for such semismooth functions that Newton's method still converges.

    Without going into details, we will also get rid of the stress as an independent variable and instead work exclusively with the displacements $\mathbf u$. Ultimately, the goal of this reformulation is that we will want to end up with a symmetric, positive definite problem - such as a linearized elasticity problem with spatially variable coefficients resulting from the plastic behavior - that needs to be solved in each Newton step. We want this because there are efficient and scalable methods for the solution of such linear systems, such as CG preconditioned with an algebraic multigrid. This is opposed to the saddle point problem akin to the mixed Laplace (see step-20) we would get were we to continue with the mixed formulation containing both displacements and stresses, and for which step-20 already gives a hint at how difficult it is to construct good solvers and preconditioners.

    -

    With this said, let us simply state the problem we obtain after reformulation (again, details can be found in the paper): Find a displacement $\mathbf u \in
-V^+$ so that

    -\begin{align*}
+<p>With this said, let us simply state the problem we obtain after reformulation (again, details can be found in the paper): Find a displacement <picture><source srcset=$\mathbf u \in
+V^+$ so that

    +\begin{align*}
 \left(P_{\Pi}(C\varepsilon(\mathbf u)),\varepsilon(\varphi) - \varepsilon(\mathbf u)\right) \geq 0,\quad \forall \varphi\in V^+.
-\end{align*} +\end{align*}" src="form_4858.png"/>

    -

    where the projector $P_\Pi$ is defined as

    -\begin{align*}
+<p> where the projector <picture><source srcset=$P_\Pi$ is defined as

    +\begin{align*}
  P_{\Pi}(\tau) \dealcoloneq \begin{cases}
     \tau, & \text{if }\vert\tau^D\vert \leq \sigma_0,\\
     \left[
@@ -201,10 +201,10 @@
     + \dfrac{1}{3}\text{trace}(\tau) I, & \text{if }\vert\tau^D\vert >
     \sigma_0,
   \end{cases}
-\end{align*} +\end{align*}" src="form_4860.png"/>

    -

    and the space $V^+$ is the space of all displacements that satisfy the contact condition:

    -\begin{align*}
+<p> and the space <picture><source srcset=$V^+$ is the space of all displacements that satisfy the contact condition:

    +\begin{align*}
   V
   &=
   \left\{ \mathbf u\in \left[H^1(\Omega)\right]^{d}:
@@ -213,18 +213,18 @@
   V^+
   &=
   \left\{ \mathbf u\in V: \mathbf n \cdot \mathbf u\leq g \text{ on } \Gamma_C \right\}.
-\end{align*} +\end{align*}" src="form_4862.png"/>

    -

    In the actual code, we will use the abbreviation $\gamma=\dfrac{\gamma^{\text{iso}}}{2\mu + \gamma^{\text{iso}}}$.

    +

    In the actual code, we will use the abbreviation $\gamma=\dfrac{\gamma^{\text{iso}}}{2\mu + \gamma^{\text{iso}}}$.

    Given this formulation, we will apply two techniques:

    A strict approach would keep the active set fixed while we iterate the Newton method to convergence (or maybe the other way around: find the final active set before moving on to the next Newton iteration). In practice, it turns out that it is sufficient to do only a single Newton step per active set iteration, and so we will iterate over them concurrently. We will also, every once in a while, refine the mesh.

    A Newton method for the plastic nonlinearity

    -

    As mentioned, we will treat the nonlinearity of the operator $P_\Pi$ by applying a Newton method, despite the fact that the operator is not differentiable in the strict sense. However, it satisfies the conditions of slant differentiability and this turns out to be enough for Newton's method to work. The resulting method then goes by the name semi-smooth Newton method, which sounds impressive but is, in reality, just a Newton method applied to a semi-smooth function with an appropriately chosen "derivative".

    -

    In the current case, we will run our iteration by solving in each iteration $i$ the following equation (still an inequality, but linearized):

    -\begin{align*}
+<p>As mentioned, we will treat the nonlinearity of the operator <picture><source srcset=$P_\Pi$ by applying a Newton method, despite the fact that the operator is not differentiable in the strict sense. However, it satisfies the conditions of slant differentiability and this turns out to be enough for Newton's method to work. The resulting method then goes by the name semi-smooth Newton method, which sounds impressive but is, in reality, just a Newton method applied to a semi-smooth function with an appropriately chosen "derivative".

    +

    In the current case, we will run our iteration by solving in each iteration $i$ the following equation (still an inequality, but linearized):

    +\begin{align*}
   \label{eq:linearization}
   \left(I_{\Pi}\varepsilon(\tilde {\mathbf u}^{i}),
     \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right) \geq
@@ -233,10 +233,10 @@
   \left(P_{\Pi}(C\varepsilon({\mathbf u}^{i-1})),
     \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right)\right),
   \quad \forall \varphi\in V^+,
-\end{align*} +\end{align*}" src="form_4864.png"/>

    -

    where the rank-4 tensor $I_\Pi=I_\Pi(\varepsilon^D(\mathbf u^{i-1}))$ given by

    -\begin{align}
+<p> where the rank-4 tensor <picture><source srcset=$I_\Pi=I_\Pi(\varepsilon^D(\mathbf u^{i-1}))$ given by

    +\begin{align}
   I_\Pi = \begin{cases}
     C_{\mu} + C_{\kappa}, & \hspace{-8em} \text{if } \vert C\varepsilon^D(\mathbf u^{i-1}) \vert \leq \sigma_0,
     \\
@@ -244,100 +244,100 @@
       2\mu\dfrac{C\varepsilon^D(\mathbf u^{i-1})\otimes C\varepsilon^D(\mathbf
         u^{i-1})}{\vert C\varepsilon^D(\mathbf u^{i-1})\vert^2}\right) + C_{\kappa}, & \text{ else.}
 \end{cases}
-\end{align} +\end{align}" src="form_4866.png"/>

    -

    This tensor is the (formal) linearization of $P_\Pi(C\cdot)$ around $\varepsilon^D(\mathbf u^{i-1})$. For the linear isotropic material we consider here, the bulk and shear components of the projector are given by

    -\begin{gather*}
+<p> This tensor is the (formal) linearization of <picture><source srcset=$P_\Pi(C\cdot)$ around $\varepsilon^D(\mathbf u^{i-1})$. For the linear isotropic material we consider here, the bulk and shear components of the projector are given by

    +\begin{gather*}
   C_{\kappa} = \kappa I\otimes I,
   \qquad\qquad\qquad\qquad
   C_{\mu} = 2\mu\left(\mathbb{I}  - \dfrac{1}{3} I\otimes
     I\right),
-\end{gather*} +\end{gather*}" src="form_4869.png"/>

    -

    where $I$ and $\mathbb{I}$ are the identity tensors of rank 2 and 4, respectively.

    -

    Note that this problem corresponds to a linear elastic contact problem where $I_\Pi$ plays the role of the elasticity tensor $C=A^{-1}$. Indeed, if the material is not plastic at a point, then $I_\Pi=C$. However, at places where the material is plastic, $I_\Pi$ is a spatially varying function. In any case, the system we have to solve for the Newton iterate $\tilde {\mathbf u}^{i}$ gets us closer to the goal of rewriting our problem in a way that allows us to use well-known solvers and preconditioners for elliptic systems.

    -

    As a final note about the Newton method let us mention that as is common with Newton methods we need to globalize it by controlling the step length. In other words, while the system above solves for $\tilde {\mathbf u}^{i}$, the final iterate will rather be

    -\begin{align*}
+<p> where <picture><source srcset=$I$ and $\mathbb{I}$ are the identity tensors of rank 2 and 4, respectively.

    +

    Note that this problem corresponds to a linear elastic contact problem where $I_\Pi$ plays the role of the elasticity tensor $C=A^{-1}$. Indeed, if the material is not plastic at a point, then $I_\Pi=C$. However, at places where the material is plastic, $I_\Pi$ is a spatially varying function. In any case, the system we have to solve for the Newton iterate $\tilde {\mathbf u}^{i}$ gets us closer to the goal of rewriting our problem in a way that allows us to use well-known solvers and preconditioners for elliptic systems.

    +

    As a final note about the Newton method let us mention that as is common with Newton methods we need to globalize it by controlling the step length. In other words, while the system above solves for $\tilde {\mathbf u}^{i}$, the final iterate will rather be

    +\begin{align*}
   {\mathbf u}^{i} = {\mathbf u}^{i-1} + \alpha_i (\tilde {\mathbf u}^{i} - {\mathbf u}^{i-1})
-\end{align*} +\end{align*}" src="form_4875.png"/>

    -

    where the difference in parentheses on the right takes the role of the traditional Newton direction, $\delta {\mathbf u}^{i}$. We will determine $\alpha^i$ using a standard line search.

    +

    where the difference in parentheses on the right takes the role of the traditional Newton direction, $\delta {\mathbf u}^{i}$. We will determine $\alpha^i$ using a standard line search.

    Active Set methods to solve the saddle point problem

    -

    This linearized problem to be solved in each Newton step is essentially like in step-41. The only difference consists in the fact that the contact area is at the boundary instead of in the domain. But this has no further consequence so that we refer to the documentation of step-41 with the only hint that $\mathcal{S}$ contains all the vertices at the contact boundary $\Gamma_C$ this time. As there, what we need to do is keep a subset of degrees of freedom fixed, leading to additional constraints that one can write as a saddle point problem. However, as discussed in the paper, by writing these constraints in an appropriate way that removes the coupling between degrees of freedom, we end up with a set of nodes that essentially just have Dirichlet values attached to them.

    +

    This linearized problem to be solved in each Newton step is essentially like in step-41. The only difference consists in the fact that the contact area is at the boundary instead of in the domain. But this has no further consequence so that we refer to the documentation of step-41 with the only hint that $\mathcal{S}$ contains all the vertices at the contact boundary $\Gamma_C$ this time. As there, what we need to do is keep a subset of degrees of freedom fixed, leading to additional constraints that one can write as a saddle point problem. However, as discussed in the paper, by writing these constraints in an appropriate way that removes the coupling between degrees of freedom, we end up with a set of nodes that essentially just have Dirichlet values attached to them.

    Overall algorithm

    The algorithm outlined above combines the damped semismooth Newton-method, which we use for the nonlinear constitutive law, with the semismooth Newton method for the contact. It works as follows:

    1. -

      Initialize the active and inactive sets $\mathcal{A}_i$ and $\mathcal{F}_i$ such that $\mathcal{S} = \mathcal{A}_i \cup \mathcal{F}_i$ and $\mathcal{A}_i \cap
- \mathcal{F}_i = \emptyset$ and set $i = 1$. Here, $\mathcal{S}$ is the set of all degrees of freedom located at the surface of the domain where contact may happen. The start value $\hat U^0 \dealcoloneq
- P_{\mathcal{A}_k}(0)$ fulfills our obstacle condition, i.e., we project an initial zero displacement onto the set of feasible displacements.

      +

      Initialize the active and inactive sets $\mathcal{A}_i$ and $\mathcal{F}_i$ such that $\mathcal{S} = \mathcal{A}_i \cup \mathcal{F}_i$ and $\mathcal{A}_i \cap
+ \mathcal{F}_i = \emptyset$ and set $i = 1$. Here, $\mathcal{S}$ is the set of all degrees of freedom located at the surface of the domain where contact may happen. The start value $\hat U^0 \dealcoloneq
+ P_{\mathcal{A}_k}(0)$ fulfills our obstacle condition, i.e., we project an initial zero displacement onto the set of feasible displacements.

    2. -

      Assemble the Newton matrix $A_{pq} \dealcoloneq a'(
- U^{i-1};\varphi_p,\varphi_q)$ and the right-hand-side $F(\hat U^{i-1})$. These correspond to the linearized Newton step, ignoring for the moment the contact inequality.

      +

      Assemble the Newton matrix $A_{pq} \dealcoloneq a'(
+ U^{i-1};\varphi_p,\varphi_q)$ and the right-hand-side $F(\hat U^{i-1})$. These correspond to the linearized Newton step, ignoring for the moment the contact inequality.

    3. -

      Find the primal-dual pair $(\tilde U^i,\Lambda^i)$ that satisfies

      -\begin{align*}
+<p class=Find the primal-dual pair $(\tilde U^i,\Lambda^i)$ that satisfies

      +\begin{align*}
  A\tilde U^i + B\Lambda^i & = F, &\\
  \left[B^T\tilde U^i\right]_p & = G_p & \forall p\in\mathcal{A}_i,\\
  \Lambda^i_p & = 0 & \forall p\in\mathcal{F}_i.
- \end{align*} + \end{align*}" src="form_4887.png"/>

      -

      As in step-41, we can obtain the solution to this problem by eliminating those degrees of freedom in ${\cal A}_i$ from the first equation and obtain a linear system $\hat {\hat A}(U^{i-1}) \tilde U^i = \hat {\hat H}(U^{i-1})$.

      +

      As in step-41, we can obtain the solution to this problem by eliminating those degrees of freedom in ${\cal A}_i$ from the first equation and obtain a linear system $\hat {\hat A}(U^{i-1}) \tilde U^i = \hat {\hat H}(U^{i-1})$.

    4. -

      Damp the Newton iteration for $i>2$ by applying a line search and calculating a linear combination of $U^{i-1}$ and $\tilde U^i$. This requires finding an $\alpha^i_l \dealcoloneq 2^{-l},(l=0,\ldots,10)$ so that

      -\begin{gather*}U^i \dealcoloneq \alpha^i_l\bar U^i +
- (1-\alpha^i_l)U^{i-1}\end{gather*} +

      Damp the Newton iteration for $i>2$ by applying a line search and calculating a linear combination of $U^{i-1}$ and $\tilde U^i$. This requires finding an $\alpha^i_l \dealcoloneq 2^{-l},(l=0,\ldots,10)$ so that

      +\begin{gather*}U^i \dealcoloneq \alpha^i_l\bar U^i +
+ (1-\alpha^i_l)U^{i-1}\end{gather*}

      satisfies

      -\begin{gather*}
+<picture><source srcset=\begin{gather*}
    \vert {\hat R}\left({\mathbf u}^{i}\right) \vert < \vert {\hat R}\left({\mathbf u}^{i-1}\right) \vert.
- \end{gather*} + \end{gather*}" src="form_4895.png"/>

      -

      with ${\hat R}\left({\mathbf u}\right)=\left(P_{Pi}(C\varepsilon(u)),\varepsilon(\varphi^{i}_p\right)$ with the exceptions of (i) elements $p\in\mathcal{A}_i$ where we set ${\hat R}\left({\mathbf u}\right)=0$, and (ii) elements that correspond to hanging nodes, which we eliminate in the usual manner.

      +

      with ${\hat R}\left({\mathbf u}\right)=\left(P_{Pi}(C\varepsilon(u)),\varepsilon(\varphi^{i}_p\right)$ with the exceptions of (i) elements $p\in\mathcal{A}_i$ where we set ${\hat R}\left({\mathbf u}\right)=0$, and (ii) elements that correspond to hanging nodes, which we eliminate in the usual manner.

    5. Define the new active and inactive sets by

      -\begin{gather*}\mathcal{A}_{i+1} \dealcoloneq \lbrace p\in\mathcal{S}:\Lambda^i_p +
- c\left(\left[B^TU^i\right]_p - G_p\right) > 0\rbrace,\end{gather*} +\begin{gather*}\mathcal{A}_{i+1} \dealcoloneq \lbrace p\in\mathcal{S}:\Lambda^i_p +
/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html	2024-03-17 21:57:46.543253445 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html	2024-03-17 21:57:46.551253494 +0000
@@ -177,37 +177,37 @@
 <p>Much inspiration for this program comes from <a class=step-31 but several of the techniques discussed here are original.

      Advection-dominated two-phase flow mathematical model

      We consider the flow of a two-phase immiscible, incompressible fluid. Capillary and gravity effects are neglected, and viscous effects are assumed dominant. The governing equations for such a flow that are identical to those used in step-21 and are

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf{u}_t &= - \mathbf{K} \lambda_t \left(S\right) \nabla p, \\
   \nabla \cdot \mathbf{u}_t &= q, \\
   \epsilon \frac{\partial S}{\partial t} + \nabla \cdot \left( \mathbf{u}_t  F\left( S \right) \right)&=0,
-\end{align*} +\end{align*}" src="form_4935.png"/>

      -

      where $S$ is the saturation (volume fraction between zero and one) of the second (wetting) phase, $p$ is the pressure, $\mathbf{K}$ is the permeability tensor, $\lambda_t$ is the total mobility, $\epsilon$ is the porosity, $F$ is the fractional flow of the wetting phase, $q$ is the source term and $\mathbf{u}_t$ is the total velocity. The total mobility, fractional flow of the wetting phase and total velocity are respectively given by

      -\begin{align*}
+<p> where <picture><source srcset=$S$ is the saturation (volume fraction between zero and one) of the second (wetting) phase, $p$ is the pressure, $\mathbf{K}$ is the permeability tensor, $\lambda_t$ is the total mobility, $\epsilon$ is the porosity, $F$ is the fractional flow of the wetting phase, $q$ is the source term and $\mathbf{u}_t$ is the total velocity. The total mobility, fractional flow of the wetting phase and total velocity are respectively given by

      +\begin{align*}
    \lambda_t(S)&= \lambda_w + \lambda_{nw} = \frac{k_{rw}(S)}{\mu_w} + \frac{k_{rnw}(S)}{\mu_{nw}}, \\
    F(S) &= \frac{\lambda_w}{\lambda_t} = \frac{\lambda_w}{\lambda_w + \lambda_{nw}} = \frac{k_{rw}(S)/\mu_w}{k_{rw}(S)/\mu_w + k_{rnw}(S)/\mu_{nw}}, \\
    \mathbf{u}_t &= \mathbf{u}_w + \mathbf{u}_{nw} = -\lambda_t(S)\mathbf{K} \cdot \nabla p,
-\end{align*} +\end{align*}" src="form_4939.png"/>

      -

      where subscripts $w, nw$ represent the wetting and non-wetting phases, respectively.

      -

      For convenience, the porosity $\epsilon$ in the saturation equation, which can be considered a scaling factor for the time variable, is set to one. Following a commonly used prescription for the dependence of the relative permeabilities $k_{rw}$ and $k_{rnw}$ on saturation, we use

      -\begin{align*}
+<p> where subscripts <picture><source srcset=$w, nw$ represent the wetting and non-wetting phases, respectively.

      +

      For convenience, the porosity $\epsilon$ in the saturation equation, which can be considered a scaling factor for the time variable, is set to one. Following a commonly used prescription for the dependence of the relative permeabilities $k_{rw}$ and $k_{rnw}$ on saturation, we use

      +\begin{align*}
    k_{rw}  &= S^2, \qquad&\qquad
    k_{rnw} &= \left( 1-S \right)^2.
-\end{align*} +\end{align*}" src="form_4943.png"/>

      The porous media equations above are augmented by initial conditions for the saturation and boundary conditions for the pressure. Since saturation and the gradient of the pressure uniquely determine the velocity, no boundary conditions are necessary for the velocity. Since the flow equations do not contain time derivatives, initial conditions for the velocity and pressure variables are not required. The flow field separates the boundary into inflow or outflow parts. Specifically,

      -\[
+<picture><source srcset=\[
    \mathbf{\Gamma}_{in}(t) = \left\{\mathbf{x} \in \partial \Omega:\mathbf{n} \cdot \mathbf{u}_t<0\right\},
-\] +\]" src="form_4944.png"/>

      -

      and we arrive at a complete model by also imposing boundary values for the saturation variable on the inflow boundary $\mathbf{\Gamma}_{in}$.

      +

      and we arrive at a complete model by also imposing boundary values for the saturation variable on the inflow boundary $\mathbf{\Gamma}_{in}$.

      Adaptive operator splitting and time stepping

      As seen in step-21, solving the flow equations for velocity and pressure are the parts of the program that take far longer than the (explicit) updating step for the saturation variable once we know the flow variables. On the other hand, the pressure and velocity depend only weakly on saturation, so one may think about only solving for pressure and velocity every few time steps while updating the saturation in every step. If we can find a criterion for when the flow variables need to be updated, we call this splitting an "adaptive operator splitting" scheme.

      Here, we use the following a posteriori criterion to decide when to re-compute pressure and velocity variables (detailed derivations and descriptions can be found in [Chueh2013]):

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \theta(n,n_p)
   =
     \max_{\kappa\in{\mathbb T}}
@@ -217,49 +217,49 @@
       - \frac 1{\lambda_t\left(S^{(n_p)}\right)} \right\|_{L^\infty(\kappa)}
     \left\|\|\mathbf{K}^{-1}\|_1\right\|_{L^\infty(\kappa)}
     \right).
-\end{align*} +\end{align*}" src="form_4946.png"/>

      -

      where superscripts in parentheses denote the number of the saturation time step at which any quantity is defined and $n_p<n$ represents the last step where we actually computed the pressure and velocity. If $\theta(n,n_p)$ exceeds a certain threshold we re-compute the flow variables; otherwise, we skip this computation in time step $n$ and only move the saturation variable one time step forward.

      -

      In short, the algorithm allows us to perform a number of saturation time steps of length $\Delta t_c^{(n)}=t^{(n)}_c-t^{(n-1)}_c$ until the criterion above tells us to re-compute velocity and pressure variables, leading to a macro time step of length

      -\[
+<p> where superscripts in parentheses denote the number of the saturation time step at which any quantity is defined and <picture><source srcset=$n_p<n$ represents the last step where we actually computed the pressure and velocity. If $\theta(n,n_p)$ exceeds a certain threshold we re-compute the flow variables; otherwise, we skip this computation in time step $n$ and only move the saturation variable one time step forward.

      +

      In short, the algorithm allows us to perform a number of saturation time steps of length $\Delta t_c^{(n)}=t^{(n)}_c-t^{(n-1)}_c$ until the criterion above tells us to re-compute velocity and pressure variables, leading to a macro time step of length

      +\[
    \Delta t_p^{(n)} = \sum_{i=n_p+1}^{n} \Delta t_c^{(i)}.
-\] +\]" src="form_4950.png"/>

      We choose the length of (micro) steps subject to the Courant-Friedrichs-Lewy (CFL) restriction according to the criterion

      -\[
+<picture><source srcset=\[
   \Delta t_c = \frac{\textrm{min}_{K}h_{K}}{7 \|\mathbf{u}_t\|_{L^{\infty}\left(\Omega\right)}},
-\] +\]" src="form_4951.png"/>

      which we have confirmed to be stable for the choice of finite element and time stepping scheme for the saturation equation discussed below ( $h_K$ denotes the diameter of cell $K$). The result is a scheme where neither micro nor macro time steps are of uniform length, and both are chosen adaptively.

      Time discretization

      Using this time discretization, we obtain the following set of equations for each time step from the IMPES approach (see step-21):

      -\begin{align*}
+<picture><source srcset=\begin{align*}
    \mathbf{u}^{(n)}_t + \lambda_t\left(S^{(n-1)}\right) \mathbf{K} \nabla p^{(n)} =0, \\
    \nabla \cdot \mathbf{u}^{(n)}_t = q, \\
    \epsilon \left( \frac{S^{(n-1)}-S^{(n)}}{\Delta t^{(n)}_c} \right) + \mathbf{u}^{(n)}_t \cdot \nabla F\left(S^{(n-1)}\right) + F\left(S^{(n-1)}\right) \nabla \cdot \mathbf{u}^{(n)}_t =0.
-\end{align*} +\end{align*}" src="form_4952.png"/>

      -

      Using the fact that $\nabla \cdot \mathbf{u}_t = q$, the time discrete saturation equation becomes

      -\begin{align*}
+<p>Using the fact that <picture><source srcset=$\nabla \cdot \mathbf{u}_t = q$, the time discrete saturation equation becomes

      +\begin{align*}
   &\epsilon \left( \frac{S^{(n)}-S^{(n-1)}}{\Delta t^{(n)}_c} \right) + \mathbf{u}^{(n)}_t \cdot \nabla F\left(S^{(n-1)}\right) + F\left(S^{(n-1)}\right)q=0.
-\end{align*} +\end{align*}" src="form_4954.png"/>

      Weak form, space discretization for the pressure-velocity part

      -

      By multiplying the equations defining the total velocity $\mathbf u_t^{(n)}$ and the equation that expresses its divergence in terms of source terms, with test functions $\mathbf{v}$ and $w$ respectively and then integrating terms by parts as necessary, the weak form of the problem reads: Find $\mathbf u, p$ so that for all test functions $\mathbf{v}, w$ there holds

      -\begin{gather*}
+<p>By multiplying the equations defining the total velocity <picture><source srcset=$\mathbf u_t^{(n)}$ and the equation that expresses its divergence in terms of source terms, with test functions $\mathbf{v}$ and $w$ respectively and then integrating terms by parts as necessary, the weak form of the problem reads: Find $\mathbf u, p$ so that for all test functions $\mathbf{v}, w$ there holds

      +\begin{gather*}
    \left( \left( \mathbf{K} \lambda_t\left(S^{(n-1)}\right) \right)^{-1} \mathbf{u}^{(n)}_t, \mathbf{v}\right)_{\Omega} - \left(p^{(n)}, \nabla \cdot \mathbf{v}\right)_{\Omega} = -\left(p^{(n)}, \mathbf{n} \cdot \mathbf{v} \right)_{\partial \Omega}, \\
    - \left( \nabla \cdot \mathbf{u}^{(n)}_t,w\right)_{\Omega} = - \big(q,w\big)_{\Omega}.
-\end{gather*} +\end{gather*}" src="form_4958.png"/>

      -

      Here, $\mathbf{n}$ represents the unit outward normal vector to $\partial
-\Omega$ and the pressure $p^{(n)}$ can be prescribed weakly on the open part of the boundary $\partial \Omega$ whereas on those parts where a velocity is prescribed (for example impermeable boundaries with $\mathbf n \cdot \mathbf
-u=0$ the term disappears altogether because $\mathbf n \cdot \mathbf
-v=0$.

      -

      We use continuous finite elements to discretize the velocity and pressure equations. Specifically, we use mixed finite elements to ensure high order approximation for both vector (e.g. a fluid velocity) and scalar variables (e.g. pressure) simultaneously. For saddle point problems, it is well established that the so-called Babuska-Brezzi or Ladyzhenskaya-Babuska-Brezzi (LBB) conditions [BrezziFortin], [Chen2005] need to be satisfied to ensure stability of the pressure-velocity system. These stability conditions are satisfied in the present work by using elements for velocity that are one order higher than for the pressure, i.e. $u_h \in Q^d_{p+1}$ and $p_h \in Q_p$, where $p=1$, $d$ is the space dimension, and $Q_s$ denotes the space of tensor product Lagrange polynomials of degree $s$ in each variable.

      +

      Here, $\mathbf{n}$ represents the unit outward normal vector to $\partial
+\Omega$ and the pressure $p^{(n)}$ can be prescribed weakly on the open part of the boundary $\partial \Omega$ whereas on those parts where a velocity is prescribed (for example impermeable boundaries with $\mathbf n \cdot \mathbf
+u=0$ the term disappears altogether because $\mathbf n \cdot \mathbf
+v=0$.

      +

      We use continuous finite elements to discretize the velocity and pressure equations. Specifically, we use mixed finite elements to ensure high order approximation for both vector (e.g. a fluid velocity) and scalar variables (e.g. pressure) simultaneously. For saddle point problems, it is well established that the so-called Babuska-Brezzi or Ladyzhenskaya-Babuska-Brezzi (LBB) conditions [BrezziFortin], [Chen2005] need to be satisfied to ensure stability of the pressure-velocity system. These stability conditions are satisfied in the present work by using elements for velocity that are one order higher than for the pressure, i.e. $u_h \in Q^d_{p+1}$ and $p_h \in Q_p$, where $p=1$, $d$ is the space dimension, and $Q_s$ denotes the space of tensor product Lagrange polynomials of degree $s$ in each variable.

      Stabilization, weak form and space discretization for the saturation transport equation

      The chosen $Q_1$ elements for the saturation equation do not lead to a stable discretization without upwinding or other kinds of stabilization, and spurious oscillations will appear in the numerical solution. Adding an artificial diffusion term is one approach to eliminating these oscillations [Chen2005]. On the other hand, adding too much diffusion smears sharp fronts in the solution and suffers from grid-orientation difficulties [Chen2005]. To avoid these effects, we use the artificial diffusion term proposed by [GuermondPasquetti2008] and validated in [Chueh2013] and [KHB12], as well as in step-31.

      This method modifies the (discrete) weak form of the saturation equation to read

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \left(\epsilon \frac{\partial S_h}{\partial t},\sigma_h\right)
   -
   \left(\mathbf{u}_t  F\left( S_h \right),
@@ -272,53 +272,53 @@
   &=0
   \qquad
   \forall \sigma_h,
-\end{align*} +\end{align*}" src="form_4966.png"/>

      -

      where $\nu$ is the artificial diffusion parameter and $\hat F$ is an appropriately chosen numerical flux on the boundary of the domain (we choose the obvious full upwind flux for this).

      -

      Following [GuermondPasquetti2008] (and as detailed in [Chueh2013]), we use the parameter as a piecewise constant function set on each cell $K$ with the diameter $h_{K}$ as

      -\[
+<p> where <picture><source srcset=$\nu$ is the artificial diffusion parameter and $\hat F$ is an appropriately chosen numerical flux on the boundary of the domain (we choose the obvious full upwind flux for this).

      +

      Following [GuermondPasquetti2008] (and as detailed in [Chueh2013]), we use the parameter as a piecewise constant function set on each cell $K$ with the diameter $h_{K}$ as

      +\[
    \nu(S_h)|_{K} = \beta \| \mathbf{u}_t \max\{F'(S_h),1\} \|_{L^{\infty}(K)} \textrm{min} \left\{ h_{K},h^{\alpha}_{K} \frac{\|\textrm{Res}(S_h)\|_{L^{\infty}(K)}}{c(\mathbf{u}_t,S)} \right\}
-\] +\]" src="form_4969.png"/>

      -

      where $\alpha$ is a stabilization exponent and $\beta$ is a dimensionless user-defined stabilization constant. Following [GuermondPasquetti2008] as well as the implementation in step-31, the velocity and saturation global normalization constant, $c(\mathbf{u}_t,S)$, and the residual $\textrm{Res}(S)$ are respectively given by

      -\[
+<p> where <picture><source srcset=$\alpha$ is a stabilization exponent and $\beta$ is a dimensionless user-defined stabilization constant. Following [GuermondPasquetti2008] as well as the implementation in step-31, the velocity and saturation global normalization constant, $c(\mathbf{u}_t,S)$, and the residual $\textrm{Res}(S)$ are respectively given by

      +\[
    c(\mathbf{u}_t,S) = c_R \|\mathbf{u}_t \max\{F'(S),1\}\|_{L^{\infty}(\Omega)} \textrm{var}(S)^\alpha | \textrm{diam} (\Omega) |^{\alpha - 2}
-\] +\]" src="form_4972.png"/>

      and

      -\[
+<picture><source srcset=\[
    \textrm{Res}(S) = \left( \epsilon \frac{\partial S}{\partial t} + \mathbf{u}_t \cdot \nabla F(S) + F(S)q \right) \cdot S^{\alpha - 1}
-\] +\]" src="form_4973.png"/>

      -

      where $c_R$ is a second dimensionless user-defined constant, $\textrm{diam}(\Omega)$ is the diameter of the domain and $\textrm{var}(S) =
-\textrm{max}_{\Omega} S - \textrm{min}_{\Omega} S$ is the range of the present saturation values in the entire computational domain $\Omega$.

      -

      This stabilization scheme has a number of advantages over simpler schemes such as finite volume (or discontinuous Galerkin) methods or streamline upwind Petrov Galerkin (SUPG) discretizations. In particular, the artificial diffusion term acts primarily in the vicinity of discontinuities since the residual is small in areas where the saturation is smooth. It therefore provides for a higher degree of accuracy. On the other hand, it is nonlinear since $\nu$ depends on the saturation $S$. We avoid this difficulty by treating all nonlinear terms explicitly, which leads to the following fully discrete problem at time step $n$:

      -\begin{align*}
+<p> where <picture><source srcset=$c_R$ is a second dimensionless user-defined constant, $\textrm{diam}(\Omega)$ is the diameter of the domain and $\textrm{var}(S) =
+\textrm{max}_{\Omega} S - \textrm{min}_{\Omega} S$ is the range of the present saturation values in the entire computational domain $\Omega$.

      +

      This stabilization scheme has a number of advantages over simpler schemes such as finite volume (or discontinuous Galerkin) methods or streamline upwind Petrov Galerkin (SUPG) discretizations. In particular, the artificial diffusion term acts primarily in the vicinity of discontinuities since the residual is small in areas where the saturation is smooth. It therefore provides for a higher degree of accuracy. On the other hand, it is nonlinear since $\nu$ depends on the saturation $S$. We avoid this difficulty by treating all nonlinear terms explicitly, which leads to the following fully discrete problem at time step $n$:

      +\begin{align*}
    &\left( \epsilon S_h^{(n)},\sigma_h\right)_{\Omega} - \Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\mathbf{u}^{*}_t,\nabla\sigma_h\Big)_{\Omega} + \Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{*}_t\right),\sigma_h\Big)_{\partial\Omega} \nonumber \\
    & \quad = \left( \epsilon S_h^{(n-1)},\sigma_h\right)_{\Omega} - \Delta t^{(n)}_c \bigg(\nu\left(S_h^{(n-1)}\right)\nabla S_h^{(n-1)},\nabla\sigma_h\bigg)_{\Omega} \nonumber \\
    & \qquad + \Delta t^{(n)}_c \bigg(\mathbf{n}\cdot\nu\left(S_h^{(n-1)}\right)\nabla S^{(n-1)},\sigma_h\bigg)_{\partial\Omega}
-\end{align*} +\end{align*}" src="form_4976.png"/>

      -

      where $\mathbf{u}_t^{*}$ is the velocity linearly extrapolated from $\mathbf{u}^{(n_p)}_t$ and $\mathbf{u}^{(n_{pp})}_t$ to the current time $t^{(n)}$ if $\theta<\theta^*$ while $\mathbf{u}_t^{*}$ is $\mathbf{u}^{(n_p)}_t$ if $\theta>\theta^*$. Consequently, the equation is linear in $S_h^{(n)}$ and all that is required is to solve with a mass matrix on the saturation space.

      +

      where $\mathbf{u}_t^{*}$ is the velocity linearly extrapolated from $\mathbf{u}^{(n_p)}_t$ and $\mathbf{u}^{(n_{pp})}_t$ to the current time $t^{(n)}$ if $\theta<\theta^*$ while $\mathbf{u}_t^{*}$ is $\mathbf{u}^{(n_p)}_t$ if $\theta>\theta^*$. Consequently, the equation is linear in $S_h^{(n)}$ and all that is required is to solve with a mass matrix on the saturation space.

      Since the Dirichlet boundary conditions for saturation are only imposed on the inflow boundaries, the third term on the left hand side of the equation above needs to be split further into two parts:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   &\Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_t\right),\sigma_h\Big)_{\partial\Omega} \nonumber \\
   &\qquad= \Delta t^{(n)}_c \Big(F\left(S^{(n-1)}_{(+)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_{t(+)}\right),\sigma_h\Big)_{\partial\Omega_{(+)}} + \Delta t^{(n)}_c \Big(F\left(S^{(n-1)}_{(-)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_{t(-)}\right),\sigma_h\Big)_{\partial\Omega_{(-)}}
-\end{align*} +\end{align*}" src="form_4984.png"/>

      -

      where $\partial\Omega_{(-)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n}
-  \cdot \mathbf{u}_t<0\right\}$ and $\partial\Omega_{(+)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n} \cdot
-  \mathbf{u}_t>0\right\}$ represent inflow and outflow boundaries, respectively. We choose values using an upwind formulation, i.e. $S^{(n-1)}_{(+)}$ and $\mathbf{u}^{(n)}_{t(+)}$ correspond to the values taken from the present cell, while the values of $S^{(n-1)}_{(-)}$ and $\mathbf{u}^{(n)}_{t(-)}$ are those taken from the neighboring boundary $\partial\Omega_{(-)}$.

      +

      where $\partial\Omega_{(-)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n}
+  \cdot \mathbf{u}_t<0\right\}$ and $\partial\Omega_{(+)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n} \cdot
+  \mathbf{u}_t>0\right\}$ represent inflow and outflow boundaries, respectively. We choose values using an upwind formulation, i.e. $S^{(n-1)}_{(+)}$ and $\mathbf{u}^{(n)}_{t(+)}$ correspond to the values taken from the present cell, while the values of $S^{(n-1)}_{(-)}$ and $\mathbf{u}^{(n)}_{t(-)}$ are those taken from the neighboring boundary $\partial\Omega_{(-)}$.

      Adaptive mesh refinement

      Choosing meshes adaptively to resolve sharp saturation fronts is an essential ingredient to achieve efficiency in our algorithm. Here, we use the same shock-type refinement approach used in [Chueh2013] to select those cells that should be refined or coarsened. The refinement indicator for each cell $K$ of the triangulation is computed by

      -\[
+<picture><source srcset=\[
    \eta_{K} = |\nabla S_h(\mathbf x_K)|
-\] +\]" src="form_4992.png"/>

      -

      where $\nabla S_h(\mathbf x_K)$ is the gradient of the discrete saturation variable evaluated at the center $\mathbf x_K$ of cell $K$. This approach is analogous to ones frequently used in compressible flow problems, where density gradients are used to indicate refinement. That said, as we will discuss at the end of the results section, this turns out to not be a very useful criterion since it leads to refinement basically everywhere. We only show it here for illustrative purposes.

      +

      where $\nabla S_h(\mathbf x_K)$ is the gradient of the discrete saturation variable evaluated at the center $\mathbf x_K$ of cell $K$. This approach is analogous to ones frequently used in compressible flow problems, where density gradients are used to indicate refinement. That said, as we will discuss at the end of the results section, this turns out to not be a very useful criterion since it leads to refinement basically everywhere. We only show it here for illustrative purposes.

      The linear system and its preconditioning

      -

      Following the discretization of the governing equations discussed above, we obtain a linear system of equations in time step $(n)$ of the following form:

      -\[
+<p>Following the discretization of the governing equations discussed above, we obtain a linear system of equations in time step <picture><source srcset=$(n)$ of the following form:

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 2024-03-17 21:57:46.707254457 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 2024-03-17 21:57:46.711254482 +0000 @@ -192,143 +192,143 @@

    Notation

    One can think of fourth-order tensors as linear operators mapping second-order tensors (matrices) onto themselves in much the same way as matrices map vectors onto vectors. There are various fourth-order unit tensors that will be required in the forthcoming presentation. The fourth-order unit tensors $\mathcal{I}$ and $\overline{\mathcal{I}}$ are defined by

    -\[
+<picture><source srcset=\[
         \mathbf{A} = \mathcal{I}:\mathbf{A}
                 \qquad \text{and} \qquad
         \mathbf{A}^T = \overline{\mathcal{I}}:\mathbf{A} \, .
-\] +\]" src="form_5040.png"/>

    Note $\mathcal{I} \neq \overline{\mathcal{I}}^T$. Furthermore, we define the symmetric and skew-symmetric fourth-order unit tensors by

    -\[
+<picture><source srcset=\[
         \mathcal{S} \dealcoloneq \dfrac{1}{2}[\mathcal{I} + \overline{\mathcal{I}}]
                 \qquad \text{and} \qquad
         \mathcal{W} \dealcoloneq \dfrac{1}{2}[\mathcal{I} - \overline{\mathcal{I}}] \, ,
-\] +\]" src="form_5041.png"/>

    such that

    -\[
+<picture><source srcset=\[
         \dfrac{1}{2}[\mathbf{A} + \mathbf{A}^T] = \mathcal{S}:\mathbf{A}
                 \qquad \text{and} \qquad
         \dfrac{1}{2}[\mathbf{A} - \mathbf{A}^T] = \mathcal{W}:\mathbf{A} \, .
-\] +\]" src="form_5042.png"/>

    The fourth-order SymmetricTensor returned by identity_tensor() is $\mathcal{S}$.

    Kinematics

    -

    Let the time domain be denoted $\mathbb{T} = [0,T_{\textrm{end}}]$, where $t \in \mathbb{T}$ and $T_{\textrm{end}}$ is the total problem duration. Consider a continuum body that occupies the reference configuration $\Omega_0$ at time $t=0$. Particles in the reference configuration are identified by the position vector $\mathbf{X}$. The configuration of the body at a later time $t>0$ is termed the current configuration, denoted $\Omega$, with particles identified by the vector $\mathbf{x}$. The nonlinear map between the reference and current configurations, denoted $\boldsymbol{\varphi}$, acts as follows:

    -\[
+<p>Let the time domain be denoted <picture><source srcset=$\mathbb{T} = [0,T_{\textrm{end}}]$, where $t \in \mathbb{T}$ and $T_{\textrm{end}}$ is the total problem duration. Consider a continuum body that occupies the reference configuration $\Omega_0$ at time $t=0$. Particles in the reference configuration are identified by the position vector $\mathbf{X}$. The configuration of the body at a later time $t>0$ is termed the current configuration, denoted $\Omega$, with particles identified by the vector $\mathbf{x}$. The nonlinear map between the reference and current configurations, denoted $\boldsymbol{\varphi}$, acts as follows:

    +\[
         \mathbf{x} = \boldsymbol{\varphi}(\mathbf{X},t) \, .
-\] +\]" src="form_5048.png"/>

    The material description of the displacement of a particle is defined by

    -\[
+<picture><source srcset=\[
         \mathbf{U}(\mathbf{X},t) = \mathbf{x}(\mathbf{X},t) - \mathbf{X} \, .
-\] +\]" src="form_5049.png"/>

    The deformation gradient $\mathbf{F}$ is defined as the material gradient of the motion:

    -\[
+<picture><source srcset=\[
         \mathbf{F}(\mathbf{X},t)
                 \dealcoloneq \dfrac{\partial \boldsymbol{\varphi}(\mathbf{X},t)}{\partial \mathbf{X}}
                 = \textrm{Grad}\ \mathbf{x}(\mathbf{X},t)
                 = \mathbf{I} + \textrm{Grad}\ \mathbf{U} \, .
-\] +\]" src="form_5050.png"/>

    -

    The determinant of the of the deformation gradient $J(\mathbf{X},t) \dealcoloneq \textrm{det}\ \mathbf{F}(\mathbf{X},t) > 0$ maps corresponding volume elements in the reference and current configurations, denoted $\textrm{d}V$ and $\textrm{d}v$, respectively, as

    -\[
+<p> The determinant of the of the deformation gradient <picture><source srcset=$J(\mathbf{X},t) \dealcoloneq \textrm{det}\ \mathbf{F}(\mathbf{X},t) > 0$ maps corresponding volume elements in the reference and current configurations, denoted $\textrm{d}V$ and $\textrm{d}v$, respectively, as

    +\[
         \textrm{d}v = J(\mathbf{X},t)\; \textrm{d}V \, .
-\] +\]" src="form_5054.png"/>

    -

    Two important measures of the deformation in terms of the spatial and material coordinates are the left and right Cauchy-Green tensors, respectively, and denoted $\mathbf{b} \dealcoloneq \mathbf{F}\mathbf{F}^T$ and $\mathbf{C} \dealcoloneq \mathbf{F}^T\mathbf{F}$. They are both symmetric and positive definite.

    +

    Two important measures of the deformation in terms of the spatial and material coordinates are the left and right Cauchy-Green tensors, respectively, and denoted $\mathbf{b} \dealcoloneq \mathbf{F}\mathbf{F}^T$ and $\mathbf{C} \dealcoloneq \mathbf{F}^T\mathbf{F}$. They are both symmetric and positive definite.

    The Green-Lagrange strain tensor is defined by

    -\[
+<picture><source srcset=\[
         \mathbf{E} \dealcoloneq \frac{1}{2}[\mathbf{C} - \mathbf{I} ]
                 = \underbrace{\frac{1}{2}[\textrm{Grad}^T \mathbf{U} +  \textrm{Grad}\mathbf{U}]}_{\boldsymbol{\varepsilon}}
                         + \frac{1}{2}[\textrm{Grad}^T\ \mathbf{U}][\textrm{Grad}\ \mathbf{U}] \, .
-\] +\]" src="form_5057.png"/>

    If the assumption of infinitesimal deformations is made, then the second term on the right can be neglected, and $\boldsymbol{\varepsilon}$ (the linearised strain tensor) is the only component of the strain tensor. This assumption is, looking at the setup of the problem, not valid in step-18, making the use of the linearized $\boldsymbol{\varepsilon}$ as the strain measure in that tutorial program questionable.

    -

    In order to handle the different response that materials exhibit when subjected to bulk and shear type deformations we consider the following decomposition of the deformation gradient $\mathbf{F}$ and the left Cauchy-Green tensor $\mathbf{b}$ into volume-changing (volumetric) and volume-preserving (isochoric) parts:

    -\[
+<p>In order to handle the different response that materials exhibit when subjected to bulk and shear type deformations we consider the following decomposition of the deformation gradient <picture><source srcset=$\mathbf{F}$ and the left Cauchy-Green tensor $\mathbf{b}$ into volume-changing (volumetric) and volume-preserving (isochoric) parts:

    +\[
         \mathbf{F}
                 = (J^{1/3}\mathbf{I})\overline{\mathbf{F}}
         \qquad \text{and} \qquad
         \mathbf{b}
         = (J^{2/3}\mathbf{I})\overline{\mathbf{F}}\,\overline{\mathbf{F}}^T
                 =  (J^{2/3}\mathbf{I})\overline{\mathbf{b}} \, .
-\] +\]" src="form_5058.png"/>

    -

    Clearly, $\textrm{det}\ \mathbf{F} = \textrm{det}\ (J^{1/3}\mathbf{I}) = J$.

    -

    The spatial velocity field is denoted $\mathbf{v}(\mathbf{x},t)$. The derivative of the spatial velocity field with respect to the spatial coordinates gives the spatial velocity gradient $\mathbf{l}(\mathbf{x},t)$, that is

    -\[
+<p> Clearly, <picture><source srcset=$\textrm{det}\ \mathbf{F} = \textrm{det}\ (J^{1/3}\mathbf{I}) = J$.

    +

    The spatial velocity field is denoted $\mathbf{v}(\mathbf{x},t)$. The derivative of the spatial velocity field with respect to the spatial coordinates gives the spatial velocity gradient $\mathbf{l}(\mathbf{x},t)$, that is

    +\[
         \mathbf{l}(\mathbf{x},t)
                 \dealcoloneq \dfrac{\partial \mathbf{v}(\mathbf{x},t)}{\partial \mathbf{x}}
                 = \textrm{grad}\ \mathbf{v}(\mathbf{x},t) \, ,
-\] +\]" src="form_5062.png"/>

    -

    where $\textrm{grad} \{\bullet \}
+<p> where <picture><source srcset=$\textrm{grad} \{\bullet \}
 = \frac{\partial \{ \bullet \} }{ \partial \mathbf{x}}
 = \frac{\partial \{ \bullet \} }{ \partial \mathbf{X}}\frac{\partial \mathbf{X} }{ \partial \mathbf{x}}
-= \textrm{Grad} \{ \bullet \} \mathbf{F}^{-1}$.

    += \textrm{Grad} \{ \bullet \} \mathbf{F}^{-1}$" src="form_5063.png"/>.

    Kinetics

    -

    Cauchy's stress theorem equates the Cauchy traction $\mathbf{t}$ acting on an infinitesimal surface element in the current configuration $\mathrm{d}a$ to the product of the Cauchy stress tensor $\boldsymbol{\sigma}$ (a spatial quantity) and the outward unit normal to the surface $\mathbf{n}$ as

    -\[
+<p>Cauchy's stress theorem equates the Cauchy traction <picture><source srcset=$\mathbf{t}$ acting on an infinitesimal surface element in the current configuration $\mathrm{d}a$ to the product of the Cauchy stress tensor $\boldsymbol{\sigma}$ (a spatial quantity) and the outward unit normal to the surface $\mathbf{n}$ as

    +\[
         \mathbf{t}(\mathbf{x},t, \mathbf{n}) = \boldsymbol{\sigma}\mathbf{n} \, .
-\] +\]" src="form_5065.png"/>

    -

    The Cauchy stress is symmetric. Similarly, the first Piola-Kirchhoff traction $\mathbf{T}$ which acts on an infinitesimal surface element in the reference configuration $\mathrm{d}A$ is the product of the first Piola-Kirchhoff stress tensor $\mathbf{P}$ (a two-point tensor) and the outward unit normal to the surface $\mathbf{N}$ as

    -\[
+<p> The Cauchy stress is symmetric. Similarly, the first Piola-Kirchhoff traction <picture><source srcset=$\mathbf{T}$ which acts on an infinitesimal surface element in the reference configuration $\mathrm{d}A$ is the product of the first Piola-Kirchhoff stress tensor $\mathbf{P}$ (a two-point tensor) and the outward unit normal to the surface $\mathbf{N}$ as

    +\[
         \mathbf{T}(\mathbf{X},t, \mathbf{N}) = \mathbf{P}\mathbf{N} \, .
-\] +\]" src="form_5068.png"/>

    -

    The Cauchy traction $\mathbf{t}$ and the first Piola-Kirchhoff traction $\mathbf{T}$ are related as

    -\[
+<p> The Cauchy traction <picture><source srcset=$\mathbf{t}$ and the first Piola-Kirchhoff traction $\mathbf{T}$ are related as

    +\[
         \mathbf{t}\mathrm{d}a = \mathbf{T}\mathrm{d}A \, .
-\] +\]" src="form_5069.png"/>

    This can be demonstrated using Nanson's formula.

    The first Piola-Kirchhoff stress tensor is related to the Cauchy stress as

    -\[
+<picture><source srcset=\[
         \mathbf{P} = J \boldsymbol{\sigma}\mathbf{F}^{-T} \, .
-\] +\]" src="form_5070.png"/>

    -

    Further important stress measures are the (spatial) Kirchhoff stress $\boldsymbol{\tau} = J \boldsymbol{\sigma}$ and the (referential) second Piola-Kirchhoff stress $\mathbf{S} = {\mathbf{F}}^{-1} \boldsymbol{\tau} {\mathbf{F}}^{-T}$.

    +

    Further important stress measures are the (spatial) Kirchhoff stress $\boldsymbol{\tau} = J \boldsymbol{\sigma}$ and the (referential) second Piola-Kirchhoff stress $\mathbf{S} = {\mathbf{F}}^{-1} \boldsymbol{\tau} {\mathbf{F}}^{-T}$.

    Push-forward and pull-back operators

    Push-forward and pull-back operators allow one to transform various measures between the material and spatial settings. The stress measures used here are contravariant, while the strain measures are covariant.

    -

    The push-forward and-pull back operations for second-order covariant tensors $(\bullet)^{\text{cov}}$ are respectively given by:

    -\[
+<p>The push-forward and-pull back operations for second-order covariant tensors <picture><source srcset=$(\bullet)^{\text{cov}}$ are respectively given by:

    +\[
         \chi_{*}(\bullet)^{\text{cov}} \dealcoloneq \mathbf{F}^{-T} (\bullet)^{\text{cov}} \mathbf{F}^{-1}
         \qquad \text{and} \qquad
         \chi^{-1}_{*}(\bullet)^{\text{cov}} \dealcoloneq \mathbf{F}^{T} (\bullet)^{\text{cov}} \mathbf{F} \, .
-\] +\]" src="form_5074.png"/>

    -

    The push-forward and pull back operations for second-order contravariant tensors $(\bullet)^{\text{con}}$ are respectively given by:

    -\[
+<p>The push-forward and pull back operations for second-order contravariant tensors <picture><source srcset=$(\bullet)^{\text{con}}$ are respectively given by:

    +\[
         \chi_{*}(\bullet)^{\text{con}} \dealcoloneq \mathbf{F} (\bullet)^{\text{con}} \mathbf{F}^T
         \qquad \text{and} \qquad
         \chi^{-1}_{*}(\bullet)^{\text{con}} \dealcoloneq \mathbf{F}^{-1} (\bullet)^{\text{con}} \mathbf{F}^{-T} \, .
-\] +\]" src="form_5076.png"/>

    -

    For example $\boldsymbol{\tau} = \chi_{*}(\mathbf{S})$.

    +

    For example $\boldsymbol{\tau} = \chi_{*}(\mathbf{S})$.

    Hyperelastic materials

    -

    A hyperelastic material response is governed by a Helmholtz free energy function $\Psi = \Psi(\mathbf{F}) = \Psi(\mathbf{C}) = \Psi(\mathbf{b})$ which serves as a potential for the stress. For example, if the Helmholtz free energy depends on the right Cauchy-Green tensor $\mathbf{C}$ then the isotropic hyperelastic response is

    -\[
+<p>A hyperelastic material response is governed by a Helmholtz free energy function <picture><source srcset=$\Psi = \Psi(\mathbf{F}) = \Psi(\mathbf{C}) = \Psi(\mathbf{b})$ which serves as a potential for the stress. For example, if the Helmholtz free energy depends on the right Cauchy-Green tensor $\mathbf{C}$ then the isotropic hyperelastic response is

    +\[
         \mathbf{S}
                 = 2 \dfrac{\partial \Psi(\mathbf{C})}{\partial \mathbf{C}} \, .
-\] +\]" src="form_5079.png"/>

    -

    If the Helmholtz free energy depends on the left Cauchy-Green tensor $\mathbf{b}$ then the isotropic hyperelastic response is

    -\[
+<p> If the Helmholtz free energy depends on the left Cauchy-Green tensor <picture><source srcset=$\mathbf{b}$ then the isotropic hyperelastic response is

    +\[
         \boldsymbol{\tau}
                 = 2 \dfrac{\partial \Psi(\mathbf{b})}{\partial \mathbf{b}} \mathbf{b}
                 =  2 \mathbf{b} \dfrac{\partial \Psi(\mathbf{b})}{\partial \mathbf{b}} \, .
-\] +\]" src="form_5080.png"/>

    Following the multiplicative decomposition of the deformation gradient, the Helmholtz free energy can be decomposed as

    -\[
+<picture><source srcset=\[
         \Psi(\mathbf{b}) = \Psi_{\text{vol}}(J) + \Psi_{\text{iso}}(\overline{\mathbf{b}}) \, .
-\] +\]" src="form_5081.png"/>

    -

    Similarly, the Kirchhoff stress can be decomposed into volumetric and isochoric parts as $\boldsymbol{\tau} = \boldsymbol{\tau}_{\text{vol}} + \boldsymbol{\tau}_{\text{iso}}$ where:

    -\begin{align*}
+<p> Similarly, the Kirchhoff stress can be decomposed into volumetric and isochoric parts as <picture><source srcset=$\boldsymbol{\tau} = \boldsymbol{\tau}_{\text{vol}} + \boldsymbol{\tau}_{\text{iso}}$ where:

    +\begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html	2024-03-17 21:57:46.775254878 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html	2024-03-17 21:57:46.779254902 +0000
@@ -132,14 +132,14 @@
 <div class= first_vector_components = <default value>);

    void collect_periodic_faces(const MeshType &mesh, const types::boundary_id b_id1, const types::boundary_id b_id2, const unsigned int direction, std::vector< PeriodicFacePair< typename MeshType::cell_iterator > > &matched_pairs, const Tensor< 1, MeshType::space_dimension > &offset=::Tensor< 1, MeshType::space_dimension >(), const FullMatrix< double > &matrix=FullMatrix< double >())

    This call loops over all faces of the container dof_handler on the periodic boundaries with boundary indicator b_id1 and b_id2, respectively. (You can assign these boundary indicators by hand after creating the coarse mesh, see Boundary indicator. Alternatively, you can also let many of the functions in namespace GridGenerator do this for if you specify the "colorize" flag; in that case, these functions will assign different boundary indicators to different parts of the boundary, with the details typically spelled out in the documentation of these functions.)

    -

    Concretely, if $\text{vertices}_{1/2}$ are the vertices of two faces $\text{face}_{1/2}$, then the function call above will match pairs of faces (and dofs) such that the difference between $\text{vertices}_2$ and $matrix\cdot \text{vertices}_1+\text{offset}$ vanishes in every component apart from direction and stores the resulting pairs with associated data in matched_pairs. (See GridTools::orthogonal_equality() for detailed information about the matching process.)

    -

    Consider, for example, the colored unit square $\Omega=[0,1]^2$ with boundary indicator 0 on the left, 1 on the right, 2 on the bottom and 3 on the top faces. (See the documentation of GridGenerator::hyper_cube() for this convention on how boundary indicators are assigned.) Then,

    GridTools::collect_periodic_faces(dof_handler,
    +

    Concretely, if $\text{vertices}_{1/2}$ are the vertices of two faces $\text{face}_{1/2}$, then the function call above will match pairs of faces (and dofs) such that the difference between $\text{vertices}_2$ and $matrix\cdot \text{vertices}_1+\text{offset}$ vanishes in every component apart from direction and stores the resulting pairs with associated data in matched_pairs. (See GridTools::orthogonal_equality() for detailed information about the matching process.)

    +

    Consider, for example, the colored unit square $\Omega=[0,1]^2$ with boundary indicator 0 on the left, 1 on the right, 2 on the bottom and 3 on the top faces. (See the documentation of GridGenerator::hyper_cube() for this convention on how boundary indicators are assigned.) Then,

    /*b_id1*/ 0,
    /*b_id2*/ 1,
    /*direction*/ 0,
    matched_pairs);
    -

    would yield periodicity constraints such that $u(0,y)=u(1,y)$ for all $y\in[0,1]$.

    -

    If we instead consider the parallelogram given by the convex hull of $(0,0)$, $(1,1)$, $(1,2)$, $(0,1)$ we can achieve the constraints $u(0,y)=u(1,y+1)$ by specifying an offset:

    +

    would yield periodicity constraints such that $u(0,y)=u(1,y)$ for all $y\in[0,1]$.

    +

    If we instead consider the parallelogram given by the convex hull of $(0,0)$, $(1,1)$, $(1,2)$, $(0,1)$ we can achieve the constraints $u(0,y)=u(1,y+1)$ by specifying an offset:

    /*b_id1*/ 0,
    /*b_id2*/ 1,
    /*direction*/ 0,
    @@ -169,18 +169,18 @@

    Here, we need to specify the orientation of the two faces using face_orientation, face_flip and face_orientation. For a closer description have a look at the documentation of DoFTools::make_periodicity_constraints. The remaining parameters are the same as for the high level interface apart from the self-explaining component_mask and affine_constraints.

    A practical example

    In the following, we show how to use the above functions in a more involved example. The task is to enforce rotated periodicity constraints for the velocity component of a Stokes flow.

    -

    On a quarter-circle defined by $\Omega=\{{\bf x}\in(0,1)^2:\|{\bf x}\|\in (0.5,1)\}$ we are going to solve the Stokes problem

    -\begin{eqnarray*}
+<p>On a quarter-circle defined by <picture><source srcset=$\Omega=\{{\bf x}\in(0,1)^2:\|{\bf x}\|\in (0.5,1)\}$ we are going to solve the Stokes problem

    +\begin{eqnarray*}
   -\Delta \; \textbf{u} + \nabla p &=& (\exp(-100\|{\bf x}-(.75,0.1)^T\|^2),0)^T, \\
   -\textrm{div}\;  \textbf{u}&=&0,\\
   \textbf{u}|_{\Gamma_1}&=&{\bf 0},
-\end{eqnarray*} +\end{eqnarray*}" src="form_5286.png"/>

    -

    where the boundary $\Gamma_1$ is defined as $\Gamma_1 \dealcoloneq \{x\in \partial\Omega: \|x\|\in\{0.5,1\}\}$. For the remaining parts of the boundary we are going to use periodic boundary conditions, i.e.

    -\begin{align*}
+<p> where the boundary <picture><source srcset=$\Gamma_1$ is defined as $\Gamma_1 \dealcoloneq \{x\in \partial\Omega: \|x\|\in\{0.5,1\}\}$. For the remaining parts of the boundary we are going to use periodic boundary conditions, i.e.

    +\begin{align*}
   u_x(0,\nu)&=-u_y(\nu,0)&\nu&\in[0,1]\\
   u_y(0,\nu)&=u_x(\nu,0)&\nu&\in[0,1].
-\end{align*} +\end{align*}" src="form_5288.png"/>

    The mesh will be generated by GridGenerator::quarter_hyper_shell(), which also documents how it assigns boundary indicators to its various boundaries if its colorize argument is set to true.

    The commented program

    @@ -205,15 +205,15 @@
    Point< 3 > center
    void quarter_hyper_shell(Triangulation< dim > &tria, const Point< dim > &center, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, const bool colorize=false)
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
    -

    Before we can prescribe periodicity constraints, we need to ensure that cells on opposite sides of the domain but connected by periodic faces are part of the ghost layer if one of them is stored on the local processor. At this point we need to think about how we want to prescribe periodicity. The vertices $\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot
-   \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    -\begin{align*}
+</div><!-- fragment --><p>Before we can prescribe periodicity constraints, we need to ensure that cells on opposite sides of the domain but connected by periodic faces are part of the ghost layer if one of them is stored on the local processor. At this point we need to think about how we want to prescribe periodicity. The vertices <picture><source srcset=$\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot
+   \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    +\begin{align*}
    R=\begin{pmatrix}
    0&1\\-1&0
    \end{pmatrix},
    \quad
    b=\begin{pmatrix}0&0\end{pmatrix}.
-   \end{align*} + \end{align*}" src="form_5291.png"/>

    The data structure we are saving the resulting information into is here based on the Triangulation.

      std::vector<GridTools::PeriodicFacePair<
    @@ -295,23 +295,23 @@
    std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
    -

    After we provided the mesh with the necessary information for the periodicity constraints, we are now able to actual create them. For describing the matching we are using the same approach as before, i.e., the $\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    -\begin{align*}
+</div><!-- fragment --><p>After we provided the mesh with the necessary information for the periodicity constraints, we are now able to actual create them. For describing the matching we are using the same approach as before, i.e., the <picture><source srcset=$\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    +\begin{align*}
    R=\begin{pmatrix}
    0&1\\-1&0
    \end{pmatrix},
    \quad
    b=\begin{pmatrix}0&0\end{pmatrix}.
-   \end{align*} + \end{align*}" src="form_5291.png"/>

    -

    These two objects not only describe how faces should be matched but also in which sense the solution should be transformed from $\text{face}_2$ to $\text{face}_1$.

    +

    These two objects not only describe how faces should be matched but also in which sense the solution should be transformed from $\text{face}_2$ to $\text{face}_1$.

      FullMatrix<double> rotation_matrix(dim);
      rotation_matrix[0][1] = 1.;
      rotation_matrix[1][0] = -1.;
     
      Tensor<1, dim> offset;
     
    -

    For setting up the constraints, we first store the periodicity information in an auxiliary object of type std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> . The periodic boundaries have the boundary indicators 2 (x=0) and 3 (y=0). All the other parameters we have set up before. In this case the direction does not matter. Due to $\text{vertices}_2=R\cdot \text{vertices}_1+b$ this is exactly what we want.

    +

    For setting up the constraints, we first store the periodicity information in an auxiliary object of type std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> . The periodic boundaries have the boundary indicators 2 (x=0) and 3 (y=0). All the other parameters we have set up before. In this case the direction does not matter. Due to $\text{vertices}_2=R\cdot \text{vertices}_1+b$ this is exactly what we want.

      std::vector<
      GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator>>
      periodicity_vector;
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 2024-03-17 21:57:46.863255421 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 2024-03-17 21:57:46.863255421 +0000 @@ -152,60 +152,60 @@

    Introduction

    This program deals with the problem of coupling different physics in different parts of the domain. Specifically, let us consider the following situation that couples a Stokes fluid with an elastic solid (these two problems were previously discussed separately in step-22 and step-8, where you may want to read up on the individual equations):

    -

    We get a weak formulation of this problem by following our usual rule of multiplying from the left by a test function and integrating over the domain. It then looks like this: Find $y = \{\mathbf v, p,
+<p>We get a weak formulation of this problem by following our usual rule of multiplying from the left by a test function and integrating over the domain. It then looks like this: Find <picture><source srcset=$y = \{\mathbf v, p,
 \mathbf u\} \in Y \subset H^1(\Omega_f)^d \times L_2(\Omega_f) \times
-H^1(\Omega_s)^d$ such that

    -\begin{align*}
+H^1(\Omega_s)^d$ such that

    +\begin{align*}
         2 \eta (\varepsilon(\mathbf a), \varepsilon(\mathbf v))_{\Omega_f}
         - (\nabla \cdot \mathbf a, p)_{\Omega_f}
         - (q, \nabla \cdot \mathbf v)_{\Omega_f} &
@@ -216,15 +216,15 @@
            (2 \eta \varepsilon(\mathbf v) - p \mathbf 1) \mathbf n)_{\Gamma_i}
         &=
         0,
-\end{align*} +\end{align*}" src="form_5307.png"/>

    -

    for all test functions $\mathbf a, q, \mathbf b$; the first, second, and third lines correspond to the fluid, solid, and interface contributions, respectively. Note that $Y$ is only a subspace of the spaces listed above to accommodate for the various Dirichlet boundary conditions.

    +

    for all test functions $\mathbf a, q, \mathbf b$; the first, second, and third lines correspond to the fluid, solid, and interface contributions, respectively. Note that $Y$ is only a subspace of the spaces listed above to accommodate for the various Dirichlet boundary conditions.

    This sort of coupling is of course possible by simply having two Triangulation and two DoFHandler objects, one each for each of the two subdomains. On the other hand, deal.II is much simpler to use if there is a single DoFHandler object that knows about the discretization of the entire problem.

    This program is about how this can be achieved. Note that the goal is not to present a particularly useful physical model (a realistic fluid-structure interaction model would have to take into account the finite deformation of the solid and the effect this has on the fluid): this is, after all, just a tutorial program intended to demonstrate techniques, not to solve actual problems. Furthermore, we will make the assumption that the interface between the subdomains is aligned with coarse mesh cell faces.

    The general idea

    Before going into more details let us state the obvious: this is a problem with multiple solution variables; for this, you will probably want to read the Handling vector valued problems documentation module first, which presents the basic philosophical framework in which we address problems with more than one solution variable. But back to the problem at hand:

    -

    The fundamental idea to implement these sort of problems in deal.II goes as follows: in the problem formulation, the velocity and pressure variables $\mathbf v, p$ only live in the fluid subdomain $\Omega_f$. But let's assume that we extend them by zero to the entire domain $\Omega$ (in the general case this means that they will be discontinuous along $\Gamma_i$). So what is the appropriate function space for these variables? We know that on $\Omega_f$ we should require $\mathbf v \in H^1(\Omega_f)^d, p \in L_2(\Omega_f)$, so for the extensions $\tilde{\mathbf v}, \tilde p$ to the whole domain the following appears a useful set of function spaces:

    -\begin{align*}
+<p>The fundamental idea to implement these sort of problems in deal.II goes as follows: in the problem formulation, the velocity and pressure variables <picture><source srcset=$\mathbf v, p$ only live in the fluid subdomain $\Omega_f$. But let's assume that we extend them by zero to the entire domain $\Omega$ (in the general case this means that they will be discontinuous along $\Gamma_i$). So what is the appropriate function space for these variables? We know that on $\Omega_f$ we should require $\mathbf v \in H^1(\Omega_f)^d, p \in L_2(\Omega_f)$, so for the extensions $\tilde{\mathbf v}, \tilde p$ to the whole domain the following appears a useful set of function spaces:

    +\begin{align*}
   \tilde {\mathbf v} &\in V
    = \{\tilde {\mathbf v}|_{\Omega_f} \in H^1(\Omega_f)^d, \quad
        \tilde {\mathbf v}|_{\Omega_s} = 0 \}
@@ -232,13 +232,13 @@
   \tilde p &\in P
   = \{\tilde p|_{\Omega_f} \in L_2(\Omega_f), \quad
        \tilde p|_{\Omega_s} = 0 \}.
-\end{align*} +\end{align*}" src="form_5312.png"/>

    -

    (Since this is not important for the current discussion, we have omitted the question of boundary values from the choice of function spaces; this question also affects whether we can choose $L_2$ for the pressure or whether we have to choose the space $L_{2,0}(\Omega_f)=\{q\in L_2(\Omega_f): \int_{\Omega_f} q
-= 0\}$ for the pressure. None of these questions are relevant to the following discussion, however.)

    -

    Note that these are indeed a linear function spaces with obvious norm. Since no confusion is possible in practice, we will henceforth omit the tilde again to denote the extension of a function to the whole domain and simply refer by $\mathbf v, p$ to both the original and the extended function.

    -

    For discretization, we need finite dimensional subspaces $V_h,P_h$ of $V, P$. For Stokes, we know from step-22 that an appropriate choice is $Q_{p+1}^d\times Q_P$ but this only holds for that part of the domain occupied by the fluid. For the extended field, let's use the following subspaces defined on the triangulation $\mathbb T$:

    -\begin{align*}
+<p> (Since this is not important for the current discussion, we have omitted the question of boundary values from the choice of function spaces; this question also affects whether we can choose <picture><source srcset=$L_2$ for the pressure or whether we have to choose the space $L_{2,0}(\Omega_f)=\{q\in L_2(\Omega_f): \int_{\Omega_f} q
+= 0\}$ for the pressure. None of these questions are relevant to the following discussion, however.)

    +

    Note that these are indeed a linear function spaces with obvious norm. Since no confusion is possible in practice, we will henceforth omit the tilde again to denote the extension of a function to the whole domain and simply refer by $\mathbf v, p$ to both the original and the extended function.

    +

    For discretization, we need finite dimensional subspaces $V_h,P_h$ of $V, P$. For Stokes, we know from step-22 that an appropriate choice is $Q_{p+1}^d\times Q_P$ but this only holds for that part of the domain occupied by the fluid. For the extended field, let's use the following subspaces defined on the triangulation $\mathbb T$:

    +\begin{align*}
   V_h
    &= \{{\mathbf v}_h \quad | \quad
        \forall K \in {\mathbb T}:
@@ -254,23 +254,23 @@
        p_h|_{\Omega_f}\ \text{is continuous}, \quad
        p_h|_K = 0\ \text{if}\ K\subset {\Omega_s}\ \}
    && \subset P.
-\end{align*} +\end{align*}" src="form_5318.png"/>

    -

    In other words, on $\Omega_f$ we choose the usual discrete spaces but we keep the (discontinuous) extension by zero. The point to make is that we now need a description of a finite element space for functions that are zero on a cell — and this is where the FE_Nothing class comes in: it describes a finite dimensional function space of functions that are constant zero. A particular property of this peculiar linear vector space is that it has no degrees of freedom: it isn't just finite dimensional, it is in fact zero dimensional, and consequently for objects of this type, FiniteElement::n_dofs_per_cell() will return zero. For discussion below, let us give this space a proper symbol:

    -\[
+<p> In other words, on <picture><source srcset=$\Omega_f$ we choose the usual discrete spaces but we keep the (discontinuous) extension by zero. The point to make is that we now need a description of a finite element space for functions that are zero on a cell — and this is where the FE_Nothing class comes in: it describes a finite dimensional function space of functions that are constant zero. A particular property of this peculiar linear vector space is that it has no degrees of freedom: it isn't just finite dimensional, it is in fact zero dimensional, and consequently for objects of this type, FiniteElement::n_dofs_per_cell() will return zero. For discussion below, let us give this space a proper symbol:

    +\[
   Z = \{ \varphi: \varphi(x)=0 \}.
-\] +\]" src="form_5319.png"/>

    -

    The symbol $Z$ reminds of the fact that functions in this space are zero. Obviously, we choose $Z_h=Z$.

    +

    The symbol $Z$ reminds of the fact that functions in this space are zero. Obviously, we choose $Z_h=Z$.

    This entire discussion above can be repeated for the variables we use to describe the elasticity equation. Here, for the extended variables, we have

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \tilde {\mathbf u} &\in U
    = \{\tilde {\mathbf u}|_{\Omega_s} \in H^1(\Omega_f)^d, \quad
        \tilde {\mathbf u}|_{\Omega_f} \in Z(\Omega_s)^d \},
-\end{align*} +\end{align*}" src="form_5321.png"/>

    and we will typically use a finite element space of the kind

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   U_h
    &= \{{\mathbf u}_h \quad | \quad
        \forall K \in {\mathbb T}:
@@ -278,25 +278,25 @@
        {\mathbf u}_h|_{\Omega_f}\ \text{is continuous}, \quad
        {\mathbf u}_h|_K \in Z^d\ \text{if}\ K\subset {\Omega_f}\}
    && \subset U
-\end{align*} +\end{align*}" src="form_5322.png"/>

    -

    of polynomial degree $r$.

    -

    So to sum up, we are going to look for a discrete vector-valued solution $y_h = \{\mathbf v_h, p_h, \mathbf u_h\}$ in the following space:

    -\begin{align*}
+<p> of polynomial degree <picture><source srcset=$r$.

    +

    So to sum up, we are going to look for a discrete vector-valued solution $y_h = \{\mathbf v_h, p_h, \mathbf u_h\}$ in the following space:

    +\begin{align*}
   Y_h = \{
       & y_h = \{\mathbf v_h, p_h, \mathbf u_h\} : \\
       & y_h|_{\Omega_f} \in Q_{p+1}^d \times Q_p \times Z^d, \\
       & y_h|_{\Omega_s} \in Z^d \times Z \times Q_r^d \}.
-\end{align*} +\end{align*}" src="form_5324.png"/>

    Implementation

    -

    So how do we implement this sort of thing? First, we realize that the discrete space $Y_h$ essentially calls for two different finite elements: First, on the fluid subdomain, we need the element $Q_{p+1}^d \times Q_p \times Z^d$ which in deal.II is readily implemented by

    +

    So how do we implement this sort of thing? First, we realize that the discrete space $Y_h$ essentially calls for two different finite elements: First, on the fluid subdomain, we need the element $Q_{p+1}^d \times Q_p \times Z^d$ which in deal.II is readily implemented by

    where FE_Nothing implements the space of functions that are always zero. Second, on the solid subdomain, we need the element $\in Z^d \times Z \times Q_r^d$, which we get using

    where FE_Nothing implements the space of functions that are always zero. Second, on the solid subdomain, we need the element $\in Z^d \times Z \times Q_r^d$, which we get using

    The next step is that we associate each of these two elements with the cells that occupy each of the two subdomains. For this we realize that in a sense the two elements are just variations of each other in that they have the same number of vector components but have different polynomial degrees — this smells very much like what one would do in $hp$ finite element methods, and it is exactly what we are going to do here: we are going to (ab)use the classes and facilities of the hp-namespace to assign different elements to different cells. In other words, we will use collect the two finite elements in an hp::FECollection, will integrate with an appropriate hp::QCollection using an hp::FEValues object, and our DoFHandler will be in hp-mode. You may wish to take a look at step-27 for an overview of all of these concepts.

    @@ -309,11 +309,11 @@

    Specifics of the implementation

    More specifically, in the program we have to address the following points:

    Dealing with the interface terms

    Let us first discuss implementing the bilinear form, which at the discrete level we recall to be

    -\begin{align*}
+<picture><source srcset=\begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html	2024-03-17 21:57:46.947255940 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html	2024-03-17 21:57:46.947255940 +0000
@@ -139,70 +139,70 @@
 <p><em>The first author would like to acknowledge the support of NSF Grant No. DMS-1520862. Timo Heister and Wolfgang Bangerth acknowledge support through NSF awards DMS-1821210, EAR-1550901, and OAC-1835673. </em></p>
 <p><a class=

    Introduction

    This program deals with the biharmonic equation,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Delta^2 u(\mathbf x) &= f(\mathbf x)
   \qquad \qquad &&\forall \mathbf x \in \Omega.
-\end{align*} +\end{align*}" src="form_5362.png"/>

    This equation appears in the modeling of thin structures such as roofs of stadiums. These objects are of course in reality three-dimensional with a large aspect ratio of lateral extent to perpendicular thickness, but one can often very accurately model these structures as two dimensional by making assumptions about how internal forces vary in the perpendicular direction. These assumptions lead to the equation above.

    The model typically comes in two different kinds, depending on what kinds of boundary conditions are imposed. The first case,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \Delta u(\mathbf x) &= h(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega,
-\end{align*} +\end{align*}" src="form_5363.png"/>

    -

    corresponds to the edges of the thin structure attached to the top of a wall of height $g(\mathbf x)$ in such a way that the bending forces that act on the structure are $h(\mathbf x)$; in most physical situations, one will have $h=0$, corresponding to the structure simply sitting atop the wall.

    +

    corresponds to the edges of the thin structure attached to the top of a wall of height $g(\mathbf x)$ in such a way that the bending forces that act on the structure are $h(\mathbf x)$; in most physical situations, one will have $h=0$, corresponding to the structure simply sitting atop the wall.

    In the second possible case of boundary values, one would have

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \frac{\partial u(\mathbf x)}{\partial \mathbf n} &= j(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega.
-\end{align*} +\end{align*}" src="form_5366.png"/>

    -

    This corresponds to a "clamped" structure for which a nonzero $j(\mathbf x)$ implies a certain angle against the horizontal.

    +

    This corresponds to a "clamped" structure for which a nonzero $j(\mathbf x)$ implies a certain angle against the horizontal.

    As with Dirichlet and Neumann boundary conditions for the Laplace equation, it is of course possible to have one kind of boundary conditions on one part of the boundary, and the other on the remainder.

    What's the issue?

    The fundamental issue with the equation is that it takes four derivatives of the solution. In the case of the Laplace equation we treated in step-3, step-4, and several other tutorial programs, one multiplies by a test function, integrates, integrates by parts, and ends up with only one derivative on both the test function and trial function – something one can do with functions that are continuous globally, but may have kinks at the interfaces between cells: The derivative may not be defined at the interfaces, but that is on a lower-dimensional manifold (and so doesn't show up in the integrated value).

    -

    But for the biharmonic equation, if one followed the same procedure using integrals over the entire domain (i.e., the union of all cells), one would end up with two derivatives on the test functions and trial functions each. If one were to use the usual piecewise polynomial functions with their kinks on cell interfaces, the first derivative would yield a discontinuous gradient, and the second derivative with delta functions on the interfaces – but because both the second derivatives of the test functions and of the trial functions yield a delta function, we would try to integrate the product of two delta functions. For example, in 1d, where $\varphi_i$ are the usual piecewise linear "hat functions", we would get integrals of the sort

    -\begin{align*}
+<p>But for the biharmonic equation, if one followed the same procedure using integrals over the entire domain (i.e., the union of all cells), one would end up with two derivatives on the test functions and trial functions each. If one were to use the usual piecewise polynomial functions with their kinks on cell interfaces, the first derivative would yield a discontinuous gradient, and the second derivative with delta functions on the interfaces – but because both the second derivatives of the test functions and of the trial functions yield a delta function, we would try to integrate the product of two delta functions. For example, in 1d, where <picture><source srcset=$\varphi_i$ are the usual piecewise linear "hat functions", we would get integrals of the sort

    +\begin{align*}
   \int_0^L (\Delta \varphi_i) (\Delta \varphi_j)
   =
   \int_0^L
   \frac 1h \left[\delta(x-x_{i-1}) - 2\delta(x-x_i) + \delta(x-x_{i+1})\right]
   \frac 1h \left[\delta(x-x_{j-1}) - 2\delta(x-x_j) + \delta(x-x_{j+1})\right]
-\end{align*} +\end{align*}" src="form_5368.png"/>

    -

    where $x_i$ is the node location at which the shape function $\varphi_i$ is defined, and $h$ is the mesh size (assumed uniform). The problem is that delta functions in integrals are defined using the relationship

    -\begin{align*}
+<p> where <picture><source srcset=$x_i$ is the node location at which the shape function $\varphi_i$ is defined, and $h$ is the mesh size (assumed uniform). The problem is that delta functions in integrals are defined using the relationship

    +\begin{align*}
   \int_0^L \delta(x-\hat x) f(x) \; dx
   =
   f(\hat x).
-\end{align*} +\end{align*}" src="form_5369.png"/>

    -

    But that only works if (i) $f(\cdot)$ is actually well defined at $\hat x$, and (ii) if it is finite. On the other hand, an integral of the form

    -\begin{align*}
+<p> But that only works if (i) <picture><source srcset=$f(\cdot)$ is actually well defined at $\hat x$, and (ii) if it is finite. On the other hand, an integral of the form

    +\begin{align*}
 \int_0^L \delta(x-x_i) \delta (x-x_i)
-\end{align*} +\end{align*}" src="form_5372.png"/>

    does not make sense. Similar reasoning can be applied for 2d and 3d situations.

    In other words: This approach of trying to integrate over the entire domain and then integrating by parts can't work.

    Historically, numerical analysts have tried to address this by inventing finite elements that are "C<sup>1</sup> continuous", i.e., that use shape functions that are not just continuous but also have continuous first derivatives. This is the realm of elements such as the Argyris element, the Clough-Tocher element and others, all developed in the late 1960s. From a twenty-first century perspective, they can only be described as bizarre in their construction. They are also exceedingly cumbersome to implement if one wants to use general meshes. As a consequence, they have largely fallen out of favor and deal.II currently does not contain implementations of these shape functions.

    What to do instead?

    So how does one approach solving such problems then? That depends a bit on the boundary conditions. If one has the first set of boundary conditions, i.e., if the equation is

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Delta^2 u(\mathbf x) &= f(\mathbf x)
   \qquad \qquad &&\forall \mathbf x \in \Omega, \\
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \Delta u(\mathbf x) &= h(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega,
-\end{align*} +\end{align*}" src="form_5373.png"/>

    -

    then the following trick works (at least if the domain is convex, see below): In the same way as we obtained the mixed Laplace equation of step-20 from the regular Laplace equation by introducing a second variable, we can here introduce a variable $v=\Delta u$ and can then replace the equations above by the following, "mixed" system:

    -\begin{align*}
+<p> then the following trick works (at least if the domain is convex, see below): In the same way as we obtained the mixed Laplace equation of <a class=step-20 from the regular Laplace equation by introducing a second variable, we can here introduce a variable $v=\Delta u$ and can then replace the equations above by the following, "mixed" system:

    +\begin{align*}
   -\Delta u(\mathbf x) +v(\mathbf x) &= 0
   \qquad \qquad &&\forall \mathbf x \in \Omega, \\
   -\Delta v(\mathbf x) &= -f(\mathbf x)
@@ -211,28 +211,28 @@
   &&\forall \mathbf x \in \partial\Omega, \\
   v(\mathbf x) &= h(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega.
-\end{align*} +\end{align*}" src="form_5375.png"/>

    -

    In other words, we end up with what is in essence a system of two coupled Laplace equations for $u,v$, each with Dirichlet-type boundary conditions. We know how to solve such problems, and it should not be very difficult to construct good solvers and preconditioners for this system either using the techniques of step-20 or step-22. So this case is pretty simple to deal with.

    -
    Note
    It is worth pointing out that this only works for domains whose boundary has corners if the domain is also convex – in other words, if there are no re-entrant corners. This sounds like a rather random condition, but it makes sense in view of the following two facts: The solution of the original biharmonic equation must satisfy $u\in H^2(\Omega)$. On the other hand, the mixed system reformulation above suggests that both $u$ and $v$ satisfy $u,v\in H^1(\Omega)$ because both variables only solve a Poisson equation. In other words, if we want to ensure that the solution $u$ of the mixed problem is also a solution of the original biharmonic equation, then we need to be able to somehow guarantee that the solution of $-\Delta u=v$ is in fact more smooth than just $H^1(\Omega)$. This can be argued as follows: For convex domains, "elliptic - regularity" implies that if the right hand side $v\in H^s$, then $u\in H^{s+2}$ if the domain is convex and the boundary is smooth enough. (This could also be guaranteed if the domain boundary is sufficiently smooth – but domains whose boundaries have no corners are not very practical in real life.) We know that $v\in H^1$ because it solves the equation $-\Delta v=f$, but we are still left with the condition on convexity of the boundary; one can show that polygonal, convex domains are good enough to guarantee that $u\in H^2$ in this case (smoothly bounded, convex domains would result in $u\in H^3$, but we don't need this much regularity). On the other hand, if the domain is not convex, we can not guarantee that the solution of the mixed system is in $H^2$, and consequently may obtain a solution that can't be equal to the solution of the original biharmonic equation.
    +

    In other words, we end up with what is in essence a system of two coupled Laplace equations for $u,v$, each with Dirichlet-type boundary conditions. We know how to solve such problems, and it should not be very difficult to construct good solvers and preconditioners for this system either using the techniques of step-20 or step-22. So this case is pretty simple to deal with.

    +
    Note
    It is worth pointing out that this only works for domains whose boundary has corners if the domain is also convex – in other words, if there are no re-entrant corners. This sounds like a rather random condition, but it makes sense in view of the following two facts: The solution of the original biharmonic equation must satisfy $u\in H^2(\Omega)$. On the other hand, the mixed system reformulation above suggests that both $u$ and $v$ satisfy $u,v\in H^1(\Omega)$ because both variables only solve a Poisson equation. In other words, if we want to ensure that the solution $u$ of the mixed problem is also a solution of the original biharmonic equation, then we need to be able to somehow guarantee that the solution of $-\Delta u=v$ is in fact more smooth than just $H^1(\Omega)$. This can be argued as follows: For convex domains, "elliptic + regularity" implies that if the right hand side $v\in H^s$, then $u\in H^{s+2}$ if the domain is convex and the boundary is smooth enough. (This could also be guaranteed if the domain boundary is sufficiently smooth – but domains whose boundaries have no corners are not very practical in real life.) We know that $v\in H^1$ because it solves the equation $-\Delta v=f$, but we are still left with the condition on convexity of the boundary; one can show that polygonal, convex domains are good enough to guarantee that $u\in H^2$ in this case (smoothly bounded, convex domains would result in $u\in H^3$, but we don't need this much regularity). On the other hand, if the domain is not convex, we can not guarantee that the solution of the mixed system is in $H^2$, and consequently may obtain a solution that can't be equal to the solution of the original biharmonic equation.

    The more complicated situation is if we have the "clamped" boundary conditions, i.e., if the equation looks like this:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Delta^2 u(\mathbf x) &= f(\mathbf x)
   \qquad \qquad &&\forall \mathbf x \in \Omega, \\
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \frac{\partial u(\mathbf x)}{\partial \mathbf n} &= j(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega.
-\end{align*} +\end{align*}" src="form_5385.png"/>

    -

    The same trick with the mixed system does not work here, because we would end up with both Dirichlet and Neumann boundary conditions for $u$, but none for $v$.

    -

    The solution to this conundrum arrived with the Discontinuous Galerkin method wave in the 1990s and early 2000s: In much the same way as one can use discontinuous shape functions for the Laplace equation by penalizing the size of the discontinuity to obtain a scheme for an equation that has one derivative on each shape function, we can use a scheme that uses continuous (but not $C^1$ continuous) shape functions and penalize the jump in the derivative to obtain a scheme for an equation that has two derivatives on each shape function. In analogy to the Interior Penalty (IP) method for the Laplace equation, this scheme for the biharmonic equation is typically called the $C^0$ IP (or C0IP) method, since it uses $C^0$ (continuous but not continuously differentiable) shape functions with an interior penalty formulation.

    +

    The same trick with the mixed system does not work here, because we would end up with both Dirichlet and Neumann boundary conditions for $u$, but none for $v$.

    +

    The solution to this conundrum arrived with the Discontinuous Galerkin method wave in the 1990s and early 2000s: In much the same way as one can use discontinuous shape functions for the Laplace equation by penalizing the size of the discontinuity to obtain a scheme for an equation that has one derivative on each shape function, we can use a scheme that uses continuous (but not $C^1$ continuous) shape functions and penalize the jump in the derivative to obtain a scheme for an equation that has two derivatives on each shape function. In analogy to the Interior Penalty (IP) method for the Laplace equation, this scheme for the biharmonic equation is typically called the $C^0$ IP (or C0IP) method, since it uses $C^0$ (continuous but not continuously differentiable) shape functions with an interior penalty formulation.

    It is worth noting that the C0IP method is not the only one that has been developed for the biharmonic equation. step-82 shows an alternative method.

    Derivation of the C0IP method

    -

    We base this program on the $C^0$ IP method presented by Susanne Brenner and Li-Yeng Sung in the paper "C \_form#href_anchor" [Brenner2005] where the method is derived for the biharmonic equation with "clamped" boundary conditions.

    -

    As mentioned, this method relies on the use of $C^0$ Lagrange finite elements where the $C^1$ continuity requirement is relaxed and has been replaced with interior penalty techniques. To derive this method, we consider a $C^0$ shape function $v_h$ which vanishes on $\partial\Omega$. We introduce notation $ \mathbb{F} $ as the set of all faces of $\mathbb{T}$, $ \mathbb{F}^b $ as the set of boundary faces, and $ \mathbb{F}^i $ as the set of interior faces for use further down below. Since the higher order derivatives of $v_h$ have two values on each interface $e\in \mathbb{F}$ (shared by the two cells $K_{+},K_{-} \in \mathbb{T}$), we cope with this discontinuity by defining the following single-valued functions on $e$:

    -\begin{align*}
+<p>We base this program on the <picture><source srcset=$C^0$ IP method presented by Susanne Brenner and Li-Yeng Sung in the paper "C \_form#href_anchor" [Brenner2005] where the method is derived for the biharmonic equation with "clamped" boundary conditions.

    +

    As mentioned, this method relies on the use of $C^0$ Lagrange finite elements where the $C^1$ continuity requirement is relaxed and has been replaced with interior penalty techniques. To derive this method, we consider a $C^0$ shape function $v_h$ which vanishes on $\partial\Omega$. We introduce notation $ \mathbb{F} $ as the set of all faces of $\mathbb{T}$, $ \mathbb{F}^b $ as the set of boundary faces, and $ \mathbb{F}^i $ as the set of interior faces for use further down below. Since the higher order derivatives of $v_h$ have two values on each interface $e\in \mathbb{F}$ (shared by the two cells $K_{+},K_{-} \in \mathbb{T}$), we cope with this discontinuity by defining the following single-valued functions on $e$:

    +\begin{align*}
   \jump{\frac{\partial^k v_h}{\partial \mathbf n^k}}
   &=
   \frac{\partial^k v_h|_{K_+}}{\partial \mathbf n^k} \bigg |_e
@@ -243,29 +243,29 @@
   \frac{1}{2}
   \bigg( \frac{\partial^k v_h|_{K_+}}{\partial \mathbf n^k} \bigg |_e
   + \frac{\partial^k v_h|_{K_-}}{\partial \mathbf n^k} \bigg |_e \bigg )
-\end{align*} +\end{align*}" src="form_5393.png"/>

    -

    for $k =1,2$ (i.e., for the gradient and the matrix of second derivatives), and where $\mathbf n$ denotes a unit vector normal to $e$ pointing from $K_+$ to $K_-$. In the literature, these functions are referred to as the "jump" and "average" operations, respectively.

    -

    To obtain the $C^0$ IP approximation $u_h$, we left multiply the biharmonic equation by $v_h$, and then integrate over $\Omega$. As explained above, we can't do the integration by parts on all of $\Omega$ with these shape functions, but we can do it on each cell individually since the shape functions are just polynomials on each cell. Consequently, we start by using the following integration-by-parts formula on each mesh cell $K \in {\mathbb{T}}$:

    -\begin{align*}
+<p> for <picture><source srcset=$k =1,2$ (i.e., for the gradient and the matrix of second derivatives), and where $\mathbf n$ denotes a unit vector normal to $e$ pointing from $K_+$ to $K_-$. In the literature, these functions are referred to as the "jump" and "average" operations, respectively.

    +

    To obtain the $C^0$ IP approximation $u_h$, we left multiply the biharmonic equation by $v_h$, and then integrate over $\Omega$. As explained above, we can't do the integration by parts on all of $\Omega$ with these shape functions, but we can do it on each cell individually since the shape functions are just polynomials on each cell. Consequently, we start by using the following integration-by-parts formula on each mesh cell $K \in {\mathbb{T}}$:

    +\begin{align*}
   \int_K v_h (\Delta^2 w_h)
   &= \int_K v_h (\nabla\cdot\nabla) (\Delta w_h)
   \\
   &= -\int_K \nabla v_h \cdot (\nabla \Delta w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n).
-\end{align*} +\end{align*}" src="form_5398.png"/>

    -

    At this point, we have two options: We can integrate the domain term's $\nabla\Delta w_h$ one more time to obtain

    -\begin{align*}
+<p> At this point, we have two options: We can integrate the domain term's <picture><source srcset=$\nabla\Delta w_h$ one more time to obtain

    +\begin{align*}
   \int_K v_h (\Delta^2 w_h)
   &= \int_K (\Delta v_h) (\Delta w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n)
      -\int_{\partial K} (\nabla v_h \cdot \mathbf n) \Delta w_h.
-\end{align*} +\end{align*}" src="form_5400.png"/>

    For a variety of reasons, this turns out to be a variation that is not useful for our purposes.

    -

    Instead, what we do is recognize that $\nabla\Delta w_h = \text{grad}\,(\text{div}\,\text{grad}\, w_h)$, and we can re-sort these operations as $\nabla\Delta w_h = \text{div}\,(\text{grad}\,\text{grad}\, w_h)$ where we typically write $\text{grad}\,\text{grad}\, w_h = D^2 w_h$ to indicate that this is the "Hessian" matrix of second derivatives. With this re-ordering, we can now integrate the divergence, rather than the gradient operator, and we get the following instead:

    -\begin{align*}
+<p>Instead, what we do is recognize that <picture><source srcset=$\nabla\Delta w_h = \text{grad}\,(\text{div}\,\text{grad}\, w_h)$, and we can re-sort these operations as $\nabla\Delta w_h = \text{div}\,(\text{grad}\,\text{grad}\, w_h)$ where we typically write $\text{grad}\,\text{grad}\, w_h = D^2 w_h$ to indicate that this is the "Hessian" matrix of second derivatives. With this re-ordering, we can now integrate the divergence, rather than the gradient operator, and we get the following instead:

    +\begin{align*}
   \int_K v_h (\Delta^2 w_h)
   &= \int_K (\nabla \nabla v_h) : (\nabla \nabla w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n)
@@ -274,11 +274,11 @@
   &= \int_K (D^2 v_h) : (D^2 w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n)
      -\int_{\partial K} (\nabla v_h) \cdot (D^2 w_h \mathbf n).
-\end{align*} +\end{align*}" src="form_5404.png"/>

    -

    Here, the colon indicates a double-contraction over the indices of the matrices to its left and right, i.e., the scalar product between two tensors. The outer product of two vectors $a \otimes b$ yields the matrix $(a \otimes b)_{ij} = a_i b_j$.

    -

    Then, we sum over all cells $K \in  \mathbb{T}$, and take into account that this means that every interior face appears twice in the sum. If we therefore split everything into a sum of integrals over cell interiors and a separate sum over cell interfaces, we can use the jump and average operators defined above. There are two steps left: First, because our shape functions are continuous, the gradients of the shape functions may be discontinuous, but the continuity guarantees that really only the normal component of the gradient is discontinuous across faces whereas the tangential component(s) are continuous. Second, the discrete formulation that results is not stable as the mesh size goes to zero, and to obtain a stable formulation that converges to the correct solution, we need to add the following terms:

    -\begin{align*}
+<p> Here, the colon indicates a double-contraction over the indices of the matrices to its left and right, i.e., the scalar product between two tensors. The outer product of two vectors <picture><source srcset=$a \otimes b$ yields the matrix $(a \otimes b)_{ij} = a_i b_j$.

    +

    Then, we sum over all cells $K \in  \mathbb{T}$, and take into account that this means that every interior face appears twice in the sum. If we therefore split everything into a sum of integrals over cell interiors and a separate sum over cell interfaces, we can use the jump and average operators defined above. There are two steps left: First, because our shape functions are continuous, the gradients of the shape functions may be discontinuous, but the continuity guarantees that really only the normal component of the gradient is discontinuous across faces whereas the tangential component(s) are continuous. Second, the discrete formulation that results is not stable as the mesh size goes to zero, and to obtain a stable formulation that converges to the correct solution, we need to add the following terms:

    +\begin{align*}
 -\sum_{e \in \mathbb{F}} \int_{e}
   \average{\frac{\partial^2 v_h}{\partial \mathbf n^2}}
   \jump{\frac{\partial u_h}{\partial \mathbf n}}
@@ -286,16 +286,16 @@
   \frac{\gamma}{h_e}\int_e
   \jump{\frac{\partial v_h}{\partial \mathbf n}}
   \jump{\frac{\partial u_h}{\partial \mathbf n}}.
-\end{align*} +\end{align*}" src="form_5408.png"/>

    -

    Then, after making cancellations that arise, we arrive at the following C0IP formulation of the biharmonic equation: find $u_h$ such that $u_h =
-g$ on $\partial \Omega$ and

    -\begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html	2024-03-17 21:57:47.011256335 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html	2024-03-17 21:57:47.019256385 +0000
@@ -134,24 +134,24 @@
 <p><em> This program was contributed by Katharina Kormann and Martin Kronbichler.</em></p>
 <p><em>The algorithm for the matrix-vector product is based on the article <a href=A generic interface for parallel cell-based finite element operator application by Martin Kronbichler and Katharina Kormann, Computers and Fluids 63:135–147, 2012, and the paper "Parallel finite element operator application: Graph partitioning and coloring" by Katharina Kormann and Martin Kronbichler in: Proceedings of the 7th IEEE International Conference on e-Science, 2011.

    Introduction

    -

    This program demonstrates how to use the cell-based implementation of finite element operators with the MatrixFree class, first introduced in step-37, to solve nonlinear partial differential equations. Moreover, we have another look at the handling of constraints within the matrix-free framework. Finally, we will use an explicit time-stepping method to solve the problem and introduce Gauss-Lobatto finite elements that are very convenient in this case since their mass matrix can be accurately approximated by a diagonal, and thus trivially invertible, matrix. The two ingredients to this property are firstly a distribution of the nodal points of Lagrange polynomials according to the point distribution of the Gauss-Lobatto quadrature rule. Secondly, the quadrature is done with the same Gauss-Lobatto quadrature rule. In this formula, the integrals $\int_K \varphi_i \varphi_j
-dx\approx \sum_q \varphi_i \varphi_j \mathrm{det}(J) \big |_{x_q}$ become zero whenever $i\neq j$, because exactly one function $\varphi_j$ is one and all others zero in the points defining the Lagrange polynomials. Moreover, the Gauss-Lobatto distribution of nodes of Lagrange polynomials clusters the nodes towards the element boundaries. This results in a well-conditioned polynomial basis for high-order discretization methods. Indeed, the condition number of an FE_Q elements with equidistant nodes grows exponentially with the degree, which destroys any benefit for orders of about five and higher. For this reason, Gauss-Lobatto points are the default distribution for the FE_Q element (but at degrees one and two, those are equivalent to the equidistant points).

    +

    This program demonstrates how to use the cell-based implementation of finite element operators with the MatrixFree class, first introduced in step-37, to solve nonlinear partial differential equations. Moreover, we have another look at the handling of constraints within the matrix-free framework. Finally, we will use an explicit time-stepping method to solve the problem and introduce Gauss-Lobatto finite elements that are very convenient in this case since their mass matrix can be accurately approximated by a diagonal, and thus trivially invertible, matrix. The two ingredients to this property are firstly a distribution of the nodal points of Lagrange polynomials according to the point distribution of the Gauss-Lobatto quadrature rule. Secondly, the quadrature is done with the same Gauss-Lobatto quadrature rule. In this formula, the integrals $\int_K \varphi_i \varphi_j
+dx\approx \sum_q \varphi_i \varphi_j \mathrm{det}(J) \big |_{x_q}$ become zero whenever $i\neq j$, because exactly one function $\varphi_j$ is one and all others zero in the points defining the Lagrange polynomials. Moreover, the Gauss-Lobatto distribution of nodes of Lagrange polynomials clusters the nodes towards the element boundaries. This results in a well-conditioned polynomial basis for high-order discretization methods. Indeed, the condition number of an FE_Q elements with equidistant nodes grows exponentially with the degree, which destroys any benefit for orders of about five and higher. For this reason, Gauss-Lobatto points are the default distribution for the FE_Q element (but at degrees one and two, those are equivalent to the equidistant points).

    Problem statement and discretization

    As an example, we choose to solve the sine-Gordon soliton equation

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 u_{tt} &=& \Delta u -\sin(u) \quad\mbox{for}\quad (x,t) \in
 \Omega \times (t_0,t_f],\\
 {\mathbf n} \cdot \nabla u &=& 0
 \quad\mbox{for}\quad (x,t) \in \partial\Omega \times (t_0,t_f],\\
 u(x,t_0) &=& u_0(x).
-\end{eqnarray*} +\end{eqnarray*}" src="form_5483.png"/>

    that was already introduced in step-25. As a simple explicit time integration method, we choose leap frog scheme using the second-order formulation of the equation. With this time stepping, the scheme reads in weak form

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (v,u^{n+1}) = (v,2 u^n-u^{n-1} -
 (\Delta t)^2 \sin(u^n)) - (\nabla v, (\Delta t)^2 \nabla u^n),
-\end{eqnarray*} +\end{eqnarray*}" src="form_5484.png"/>

    where v denotes a test function and the index n stands for the time step number.

    For the spatial discretization, we choose FE_Q elements with basis functions defined to interpolate the support points of the Gauss-Lobatto quadrature rule. Moreover, when we compute the integrals over the basis functions to form the mass matrix and the operator on the right hand side of the equation above, we use the Gauss-Lobatto quadrature rule with the same support points as the node points of the finite element to evaluate the integrals. Since the finite element is Lagrangian, this will yield a diagonal mass matrix on the left hand side of the equation, making the solution of the linear system in each time step trivial.

    @@ -159,19 +159,19 @@

    Apart from the fact that we avoid solving linear systems with this type of elements when using explicit time-stepping, they come with two other advantages. When we are using the sum-factorization approach to evaluate the finite element operator (cf. step-37), we have to evaluate the function at the quadrature points. In the case of Gauss-Lobatto elements, where quadrature points and node points of the finite element coincide, this operation is trivial since the value of the function at the quadrature points is given by its one-dimensional coefficients. In this way, the arithmetic work for the finite element operator evaluation is reduced by approximately a factor of two compared to the generic Gaussian quadrature.

    To sum up the discussion, by using the right finite element and quadrature rule combination, we end up with a scheme where we only need to compute the right hand side vector corresponding to the formulation above and then multiply it by the inverse of the diagonal mass matrix in each time step. In practice, of course, we extract the diagonal elements and invert them only once at the beginning of the program.

    Implementation of constraints

    -

    The usual way to handle constraints in deal.II is to use the AffineConstraints class that builds a sparse matrix storing information about which degrees of freedom (DoF) are constrained and how they are constrained. This format uses an unnecessarily large amount of memory since there are not so many different types of constraints: for example, in the case of hanging nodes when using linear finite element on every cell, most constraints have the form $x_k = \frac 12 x_i + \frac 12 x_j$ where the coefficients $\frac 12$ are always the same and only $i,j,k$ are different. While storing this redundant information is not a problem in general because it is only needed once during matrix and right hand side assembly, it becomes a bottleneck in the matrix-free approach since there this information has to be accessed every time we apply the operator, and the remaining components of the operator evaluation are so fast. Thus, instead of an AffineConstraints object, MatrixFree uses a variable that we call constraint_pool that collects the weights of the different constraints. Then, only an identifier of each constraint in the mesh instead of all the weights have to be stored. Moreover, the constraints are not applied in a pre- and postprocessing step but rather as we evaluate the finite element operator. Therefore, the constraint information is embedded into the variable indices_local_to_global that is used to extract the cell information from the global vector. If a DoF is constrained, the indices_local_to_global variable contains the global indices of the DoFs that it is constrained to. Then, we have another variable constraint_indicator at hand that holds, for each cell, the local indices of DoFs that are constrained as well as the identifier of the type of constraint. Fortunately, you will not see these data structures in the example program since the class FEEvaluation takes care of the constraints without user interaction.

    +

    The usual way to handle constraints in deal.II is to use the AffineConstraints class that builds a sparse matrix storing information about which degrees of freedom (DoF) are constrained and how they are constrained. This format uses an unnecessarily large amount of memory since there are not so many different types of constraints: for example, in the case of hanging nodes when using linear finite element on every cell, most constraints have the form $x_k = \frac 12 x_i + \frac 12 x_j$ where the coefficients $\frac 12$ are always the same and only $i,j,k$ are different. While storing this redundant information is not a problem in general because it is only needed once during matrix and right hand side assembly, it becomes a bottleneck in the matrix-free approach since there this information has to be accessed every time we apply the operator, and the remaining components of the operator evaluation are so fast. Thus, instead of an AffineConstraints object, MatrixFree uses a variable that we call constraint_pool that collects the weights of the different constraints. Then, only an identifier of each constraint in the mesh instead of all the weights have to be stored. Moreover, the constraints are not applied in a pre- and postprocessing step but rather as we evaluate the finite element operator. Therefore, the constraint information is embedded into the variable indices_local_to_global that is used to extract the cell information from the global vector. If a DoF is constrained, the indices_local_to_global variable contains the global indices of the DoFs that it is constrained to. Then, we have another variable constraint_indicator at hand that holds, for each cell, the local indices of DoFs that are constrained as well as the identifier of the type of constraint. Fortunately, you will not see these data structures in the example program since the class FEEvaluation takes care of the constraints without user interaction.

    In the presence of hanging nodes, the diagonal mass matrix obtained on the element level via the Gauss-Lobatto quadrature/node point procedure does not directly translate to a diagonal global mass matrix, as following the constraints on rows and columns would also add off-diagonal entries. As explained in Kormann (2016), interpolating constraints on a vector, which maintains the diagonal shape of the mass matrix, is consistent with the equations up to an error of the same magnitude as the quadrature error. In the program below, we will simply assemble the diagonal of the mass matrix as if it were a vector to enable this approximation.

    Parallelization

    The MatrixFree class comes with the option to be parallelized on three levels: MPI parallelization on clusters of distributed nodes, thread parallelization scheduled by the Threading Building Blocks library, and finally with a vectorization by working on a batch of two (or more) cells via SIMD data type (sometimes called cross-element or external vectorization). As we have already discussed in step-37, you will get best performance by using an instruction set specific to your system, e.g. with the cmake variable -DCMAKE_CXX_FLAGS="-march=native". The MPI parallelization was already exploited in step-37. Here, we additionally consider thread parallelization with TBB. This is fairly simple, as all we need to do is to tell the initialization of the MatrixFree object about the fact that we want to use a thread parallel scheme through the variable MatrixFree::AdditionalData::thread_parallel_scheme. During setup, a dependency graph is set up similar to the one described in the workstream_paper , which allows to schedule the work of the local_apply function on chunks of cells without several threads accessing the same vector indices. As opposed to the WorkStream loops, some additional clever tricks to avoid global synchronizations as described in Kormann and Kronbichler (2011) are also applied.

    Note that this program is designed to be run with a distributed triangulation (parallel::distributed::Triangulation), which requires deal.II to be configured with p4est as described in the deal.II ReadMe file. However, a non-distributed triangulation is also supported, in which case the computation will be run in serial.

    The test case

    In our example, we choose the initial value to be

    -\begin{eqnarray*} u(x,t) =
+<picture><source srcset=\begin{eqnarray*} u(x,t) =
 \prod_{i=1}^{d} -4 \arctan \left(
 \frac{m}{\sqrt{1-m^2}}\frac{\sin\left(\sqrt{1-m^2} t +c_2\right)}{\cosh(mx_i+c_1)}\right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_5487.png"/>

    -

    and solve the equation over the time interval [-10,10]. The constants are chosen to be $c_1=c_1=0$ and m=0.5. As mentioned in step-25, in one dimension u as a function of t is the exact solution of the sine-Gordon equation. For higher dimension, this is however not the case.

    +

    and solve the equation over the time interval [-10,10]. The constants are chosen to be $c_1=c_1=0$ and m=0.5. As mentioned in step-25, in one dimension u as a function of t is the exact solution of the sine-Gordon equation. For higher dimension, this is however not the case.

    The commented program

    The necessary files from the deal.II library.

      #href_anchor"line">  #include <deal.II/base/multithread_info.h>
    @@ -443,7 +443,7 @@
    STL namespace.

    SineGordonProblem::make_grid_and_dofs

    -

    As in step-25 this functions sets up a cube grid in dim dimensions of extent $[-15,15]$. We refine the mesh more in the center of the domain since the solution is concentrated there. We first refine all cells whose center is within a radius of 11, and then refine once more for a radius 6. This simple ad hoc refinement could be done better by adapting the mesh to the solution using error estimators during the time stepping as done in other example programs, and using parallel::distributed::SolutionTransfer to transfer the solution to the new mesh.

    +

    As in step-25 this functions sets up a cube grid in dim dimensions of extent $[-15,15]$. We refine the mesh more in the center of the domain since the solution is concentrated there. We first refine all cells whose center is within a radius of 11, and then refine once more for a radius 6. This simple ad hoc refinement could be done better by adapting the mesh to the solution using error estimators during the time stepping as done in other example programs, and using parallel::distributed::SolutionTransfer to transfer the solution to the new mesh.

      template <int dim>
      void SineGordonProblem<dim>::make_grid_and_dofs()
      {
    @@ -711,13 +711,13 @@   MF SpMV dealii MF dealii -2D, $\mathcal{Q}_2$ 0.0106 0.00971 0.109 0.0243 0.124 +2D, $\mathcal{Q}_2$ 0.0106 0.00971 0.109 0.0243 0.124 -2D, $\mathcal{Q}_4$ 0.0328 0.0706 0.528 0.0714 0.502 +2D, $\mathcal{Q}_4$ 0.0328 0.0706 0.528 0.0714 0.502 -3D, $\mathcal{Q}_2$ 0.0151 0.0320 0.331 0.0376 0.364 +3D, $\mathcal{Q}_2$ 0.0151 0.0320 0.331 0.0376 0.364 -3D, $\mathcal{Q}_4$ 0.0918 0.844 6.83 0.194 6.95 +3D, $\mathcal{Q}_4$ 0.0918 0.844 6.83 0.194 6.95

    It is apparent that the matrix-free code outperforms the standard assembly routines in deal.II by far. In 3D and for fourth order elements, one operator evaluation is also almost ten times as fast as a sparse matrix-vector product.

    Parallel run in 2D and 3D

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 2024-03-17 21:57:47.067256681 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 2024-03-17 21:57:47.071256706 +0000 @@ -189,7 +189,7 @@
    ...
    Definition point.h:112
    SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
    -

    It is relevant to note that all points in Gmsh are three-dimensional objects. Since we here want to generate a two-dimensional mesh, the points simply have a zero $z$ coordinate. The fourth number in the curly braces for each point (equal to 1.0 for all of the points above) indicates the desired mesh size in the vicinity of this point. Gmsh's graphical user interfaces writes this into the .geo file automatically, but it can be omitted and one would probably do that if one were to write this file by hand.

    +

    It is relevant to note that all points in Gmsh are three-dimensional objects. Since we here want to generate a two-dimensional mesh, the points simply have a zero $z$ coordinate. The fourth number in the curly braces for each point (equal to 1.0 for all of the points above) indicates the desired mesh size in the vicinity of this point. Gmsh's graphical user interfaces writes this into the .geo file automatically, but it can be omitted and one would probably do that if one were to write this file by hand.

    The file contains many more points than just these six. If you look into the file, you will also realize that one does not have to enumerate points consecutively: One can number them in whichever way one wants, which is often useful when constructing complex geometries. In such cases, one might for example want to number all points for one particular part of the geometry starting at zero, and the points for another part at, say, 1000. It does not matter whether all numbers between zero and 1000 are used.

    Lines on outer domain

    To create lines of the mesh, go to Geometry -> Elementary entities -> Add -> Line. You do not get a prompt to enter in specific coordinates, rather you simply click a starting point and ending point for each line.

    @@ -441,7 +441,7 @@
    void merge_triangulations(const Triangulation< dim, spacedim > &triangulation_1, const Triangulation< dim, spacedim > &triangulation_2, Triangulation< dim, spacedim > &result, const double duplicated_vertex_tolerance=1.0e-12, const bool copy_manifold_ids=false, const bool copy_boundary_ids=false)

    grid_3: Moving vertices

    In this function, we move vertices of a mesh. This is simpler than one usually expects: if you ask a cell using cell->vertex(i) for the coordinates of its ith vertex, it doesn't just provide the location of this vertex but in fact a reference to the location where these coordinates are stored. We can then modify the value stored there.

    -

    So this is what we do in the first part of this function: We create a square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located at the origin. We then loop over all cells and all vertices and if a vertex has a $y$ coordinate equal to one, we move it upward by 0.5.

    +

    So this is what we do in the first part of this function: We create a square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located at the origin. We then loop over all cells and all vertices and if a vertex has a $y$ coordinate equal to one, we move it upward by 0.5.

    Note that this sort of procedure does not usually work this way because one will typically encounter the same vertices multiple times and may move them more than once. It works here because we select the vertices we want to use based on their geometric location, and a vertex moved once will fail this test in the future. A more general approach to this problem would have been to keep a std::set of those vertex indices that we have already moved (which we can obtain using cell->vertex_index(i) and only move those vertices whose index isn't in the set yet.

      void grid_3()
      {
    @@ -481,8 +481,8 @@
     
    void extrude_triangulation(const Triangulation< 2, 2 > &input, const unsigned int n_slices, const double height, Triangulation< 3, 3 > &result, const bool copy_manifold_ids=false, const std::vector< types::manifold_id > &manifold_priorities={})

    grid_5: Demonstrating GridTools::transform, part 1

    -

    This and the next example first create a mesh and then transform it by moving every node of the mesh according to a function that takes a point and returns a mapped point. In this case, we transform $(x,y) \mapsto
-   (x,y+\sin(\pi x/5))$.

    +

    This and the next example first create a mesh and then transform it by moving every node of the mesh according to a function that takes a point and returns a mapped point. In this case, we transform $(x,y) \mapsto
+   (x,y+\sin(\pi x/5))$.

    GridTools::transform() takes a triangulation and an argument that can be called like a function taking a Point and returning a Point. There are different ways of providing such an argument: It could be a pointer to a function; it could be an object of a class that has an operator(); it could be a lambda function; or it could be anything that is described via a std::function<Point<2>(const Point<2>)> object.

    Decidedly the more modern way is to use a lambda function that takes a Point and returns a Point, and that is what we do in the following:

      void grid_5()
    @@ -510,7 +510,7 @@
    static constexpr double PI
    Definition numbers.h:259
    ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)

    grid_6: Demonstrating GridTools::transform, part 2

    -

    In this second example of transforming points from an original to a new mesh, we will use the mapping $(x,y) \mapsto (x,\tanh(2y)/\tanh(2))$. To make things more interesting, rather than doing so in a single function as in the previous example, we here create an object with an operator() that will be called by GridTools::transform. Of course, this object may in reality be much more complex: the object may have member variables that play a role in computing the new locations of vertices.

    +

    In this second example of transforming points from an original to a new mesh, we will use the mapping $(x,y) \mapsto (x,\tanh(2y)/\tanh(2))$. To make things more interesting, rather than doing so in a single function as in the previous example, we here create an object with an operator() that will be called by GridTools::transform. Of course, this object may in reality be much more complex: the object may have member variables that play a role in computing the new locations of vertices.

      struct Grid6Func
      {
      double trans(const double y) const
    @@ -783,9 +783,9 @@

    This creates the following mesh:

    -

    This mesh has the right general shape, but the top cells are now polygonal: their edges are no longer along circles and we do not have a very accurate representation of the original geometry. The next step is to teach the top part of the domain that it should be curved. Put another way, all calculations done on the top boundary cells should be done in cylindrical coordinates rather than Cartesian coordinates. We can do this by creating a CylindricalManifold object and associating it with the cells above $y = 3$. This way, when we refine the cells on top, we will place new points along concentric circles instead of straight lines.

    -

    In deal.II we describe all geometries with classes that inherit from Manifold. The default geometry is Cartesian and is implemented in the FlatManifold class. As the name suggests, Manifold and its inheriting classes provide a way to describe curves and curved cells in a general way with ideas and terminology from differential geometry: for example, CylindricalManifold inherits from ChartManifold, which describes a geometry through pull backs and push forwards. In general, one should think that the Triangulation class describes the topology of a domain (in addition, of course, to storing the locations of the vertices) while the Manifold classes describe the geometry of a domain (e.g., whether or not a pair of vertices lie along a circular arc or a straight line). A Triangulation will refine cells by doing computations with the Manifold associated with that cell regardless of whether or not the cell is on the boundary. Put another way: the Manifold classes do not need any information about where the boundary of the Triangulation actually is: it is up to the Triangulation to query the right Manifold for calculations on a cell. Most Manifold functions (e.g., Manifold::get_intermediate_point) know nothing about the domain itself and just assume that the points given to it lie along a geodesic. In this case, with the CylindricalManifold constructed below, the geodesics are arcs along circles orthogonal to the $z$-axis centered along the line $(0, 3, z)$.

    -

    Since all three top parts of the domain use the same geodesics, we will mark all cells with centers above the $y = 3$ line as being cylindrical in nature:

    +

    This mesh has the right general shape, but the top cells are now polygonal: their edges are no longer along circles and we do not have a very accurate representation of the original geometry. The next step is to teach the top part of the domain that it should be curved. Put another way, all calculations done on the top boundary cells should be done in cylindrical coordinates rather than Cartesian coordinates. We can do this by creating a CylindricalManifold object and associating it with the cells above $y = 3$. This way, when we refine the cells on top, we will place new points along concentric circles instead of straight lines.

    +

    In deal.II we describe all geometries with classes that inherit from Manifold. The default geometry is Cartesian and is implemented in the FlatManifold class. As the name suggests, Manifold and its inheriting classes provide a way to describe curves and curved cells in a general way with ideas and terminology from differential geometry: for example, CylindricalManifold inherits from ChartManifold, which describes a geometry through pull backs and push forwards. In general, one should think that the Triangulation class describes the topology of a domain (in addition, of course, to storing the locations of the vertices) while the Manifold classes describe the geometry of a domain (e.g., whether or not a pair of vertices lie along a circular arc or a straight line). A Triangulation will refine cells by doing computations with the Manifold associated with that cell regardless of whether or not the cell is on the boundary. Put another way: the Manifold classes do not need any information about where the boundary of the Triangulation actually is: it is up to the Triangulation to query the right Manifold for calculations on a cell. Most Manifold functions (e.g., Manifold::get_intermediate_point) know nothing about the domain itself and just assume that the points given to it lie along a geodesic. In this case, with the CylindricalManifold constructed below, the geodesics are arcs along circles orthogonal to the $z$-axis centered along the line $(0, 3, z)$.

    +

    Since all three top parts of the domain use the same geodesics, we will mark all cells with centers above the $y = 3$ line as being cylindrical in nature:

    const Tensor<1, 3> axis({0.0, 0.0, 1.0});
    const Point<3> axial_point(0, 3.0, 0.0);
    const CylindricalManifold<3> cylinder(axis, axial_point);
    @@ -806,7 +806,7 @@
    Point< 3 > center

    With this code, we get a mesh that looks like this:

    -

    This change fixes the boundary but creates a new problem: the cells adjacent to the cylinder's axis are badly distorted. We should use Cartesian coordinates for calculations on these central cells to avoid this issue. The cells along the center line all have a face that touches the line $(0, 3, z)$ so, to implement this, we go back and overwrite the manifold_ids on these cells to be zero (which is the default):

    +

    This change fixes the boundary but creates a new problem: the cells adjacent to the cylinder's axis are badly distorted. We should use Cartesian coordinates for calculations on these central cells to avoid this issue. The cells along the center line all have a face that touches the line $(0, 3, z)$ so, to implement this, we go back and overwrite the manifold_ids on these cells to be zero (which is the default):

    const Tensor<1, 3> axis({0.0, 0.0, 1.0});
    const Point<3> axial_point(0, 3.0, 0.0);
    const CylindricalManifold<3> cylinder(axis, axial_point);
    @@ -838,7 +838,7 @@

    Possibilities for extensions

    Assigning different boundary ids

    It is often useful to assign different boundary ids to a mesh that is generated in one form or another as described in this tutorial to apply different boundary conditions.

    -

    For example, you might want to apply a different boundary condition for the right boundary of the first grid in this program. To do this, iterate over the cells and their faces and identify the correct faces (for example using cell->center() to query the coordinates of the center of a cell as we do in step-1, or using cell->face(f)->get_boundary_id() to query the current boundary indicator of the $f$th face of the cell). You can then use cell->face(f)->set_boundary_id() to set the boundary id to something different. You can take a look back at step-1 how iteration over the meshes is done there.

    +

    For example, you might want to apply a different boundary condition for the right boundary of the first grid in this program. To do this, iterate over the cells and their faces and identify the correct faces (for example using cell->center() to query the coordinates of the center of a cell as we do in step-1, or using cell->face(f)->get_boundary_id() to query the current boundary indicator of the $f$th face of the cell). You can then use cell->face(f)->set_boundary_id() to set the boundary id to something different. You can take a look back at step-1 how iteration over the meshes is done there.

    Extracting a boundary mesh

    Computations on manifolds, like they are done in step-38, require a surface mesh embedded into a higher dimensional space. While some can be constructed using the GridGenerator namespace or loaded from a file, it is sometimes useful to extract a surface mesh from a volume mesh.

    Use the function GridGenerator::extract_boundary_mesh() to extract the surface elements of a mesh. Using the function on a 3d mesh (a Triangulation<3,3>, for example from grid_4()), this will return a Triangulation<2,3> that you can use in step-38. Also try extracting the boundary mesh of a Triangulation<2,2>.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 2024-03-17 21:57:47.115256977 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 2024-03-17 21:57:47.119257002 +0000 @@ -136,22 +136,22 @@ Regarding the mathematical side, we show how to support a variable coefficient in the elliptic operator and how to use preconditioned iterative solvers for the linear systems of equations.

    The equation to solve here is as follows:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\nabla \cdot a(\mathbf x) \nabla u(\mathbf x) &= 1 \qquad\qquad & \text{in}\ \Omega,
   \\
   u &= 0 \qquad\qquad & \text{on}\ \partial\Omega.
-\end{align*} +\end{align*}" src="form_5497.png"/>

    -

    If $a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation. However, if it is indeed spatially variable, it is a more complex equation (often referred to as the "extended Poisson equation"). Depending on what the variable $u$ refers to it models a variety of situations with wide applicability:

    +

    If $a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation. However, if it is indeed spatially variable, it is a more complex equation (often referred to as the "extended Poisson equation"). Depending on what the variable $u$ refers to it models a variety of situations with wide applicability:

    Since the Laplace/Poisson equation appears in so many contexts, there are many more interpretations than just the two listed above.

    When assembling the linear system for this equation, we need the weak form which here reads as follows:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (a \nabla \varphi, \nabla u) &= (\varphi, 1) \qquad \qquad \forall \varphi.
-\end{align*} +\end{align*}" src="form_5499.png"/>

    The implementation in the assemble_system function follows immediately from this.

    The commented program

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-03-17 21:57:47.219257620 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-03-17 21:57:47.223257645 +0000 @@ -154,14 +154,14 @@ (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h \end{align*}" src="form_5500.png"/>

    -

    on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

    -

    The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

    +

    on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

    +

    The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

    \begin{align*}
  e_{\text{cell}}(K) &= h^2 \| f + \epsilon \triangle u \|_K^2, \\
  e_{\text{face}}(K) &= \sum_F h_F \| \jump{ \epsilon \nabla u \cdot n } \|_F^2,
 \end{align*}

    -

    to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

    +

    to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

    @@ -170,41 +170,41 @@

    As mentioned above, the purpose of this program is to demonstrate the use of algebraic and geometric multigrid methods for this problem, and to do so for parallel computations. An important component of making algorithms scale to large parallel machines is ensuring that every processor has the same amount of work to do. (More precisely, what matters is that there are no small fraction of processors that have substantially more work than the rest since, if that were so, a large fraction of processors will sit idle waiting for the small fraction to finish. Conversely, a small fraction of processors having substantially less work is not a problem because the majority of processors continues to be productive and only the small fraction sits idle once finished with their work.)

    For the active mesh, we use the parallel::distributed::Triangulation class as done in step-40 which uses functionality in the external library p4est for the distribution of the active cells among processors. For the non-active cells in the multilevel hierarchy, deal.II implements what we will refer to as the "first-child rule" where, for each cell in the hierarchy, we recursively assign the parent of a cell to the owner of the first child cell. The following figures give an example of such a distribution. Here the left image represents the active cells for a sample 2D mesh partitioned using a space-filling curve (which is what p4est uses to partition cells); the center image gives the tree representation of the active mesh; and the right image gives the multilevel hierarchy of cells. The colors and numbers represent the different processors. The circular nodes in the tree are the non-active cells which are distributed using the "first-child rule".

    -

    Included among the output to screen in this example is a value "Partition efficiency" given by one over MGTools::workload_imbalance(). This value, which will be denoted by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance on each level of the multigrid hierarchy. This imbalance is evident from the example above: while level $\ell=2$ is about as well balanced as is possible with four cells among three processors, the coarse level $\ell=0$ has work for only one processor, and level $\ell=1$ has work for only two processors of which one has three times as much work as the other.

    -

    For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

    -\begin{align*}
+<p>Included among the output to screen in this example is a value MGTools::workload_imbalance(). This value, which will be denoted by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance on each level of the multigrid hierarchy. This imbalance is evident from the example above: while level $\ell=2$ is about as well balanced as is possible with four cells among three processors, the coarse level $\ell=0$ has work for only one processor, and level $\ell=1$ has work for only two processors of which one has three times as much work as the other.

    +

    For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

    +\begin{align*}
 W_{\text{opt}} = \frac1{P}\sum_{\ell} N_{\ell} = \sum_{\ell}\left(\frac1{P}\sum_{p}N_{\ell,p}\right).
-\end{align*} +\end{align*}" src="form_5515.png"/>

    Next, assuming a synchronization of work on each level (i.e., on each level of a V-cycle, work must be completed by all processors before moving on to the next level), the limiting effort on each level is given by

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 W_\ell = \max_{p} N_{\ell,p},
-\end{align*} +\end{align*}" src="form_5516.png"/>

    and the total parallel complexity

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 W = \sum_{\ell} W_\ell.
-\end{align*} +\end{align*}" src="form_5517.png"/>

    -

    Then we define $\mathbb{E}$ as a ratio of the optimal partition to the parallel complexity of the current partition

    -\begin{align*}
+<p> Then we define <picture><source srcset=$\mathbb{E}$ as a ratio of the optimal partition to the parallel complexity of the current partition

    +\begin{align*}
   \mathbb{E} = \frac{W_{\text{opt}}}{W}.
-\end{align*} +\end{align*}" src="form_5518.png"/>

    For the example distribution above, we have

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 W_{\text{opt}}&=\frac{1}{P}\sum_{\ell} N_{\ell} = \frac{1}{3} \left(1+4+4\right)= 3 \qquad
 \\
 W &= \sum_\ell W_\ell = 1 + 2 + 3 = 6
 \\
 \mathbb{E} &= \frac{W_{\text{opt}}}{W} = \frac12.
-\end{align*} +\end{align*}" src="form_5519.png"/>

    -

    The value MGTools::workload_imbalance() $= 1/\mathbb{E}$ then represents the factor increase in timings we expect for GMG methods (vmults, assembly, etc.) due to the imbalance of the mesh partition compared to a perfectly load-balanced workload. We will report on these in the results section below for a sequence of meshes, and compare with the observed slow-downs as we go to larger and larger processor numbers (where, typically, the load imbalance becomes larger as well).

    -

    These sorts of considerations are considered in much greater detail in [clevenger_par_gmg], which contains a full discussion of the partition efficiency model and the effect the imbalance has on the GMG V-cycle timing. In summary, the value of $\mathbb{E}$ is highly dependent on the degree of local mesh refinement used and has an optimal value $\mathbb{E} \approx 1$ for globally refined meshes. Typically for adaptively refined meshes, the number of processors used to distribute a single mesh has a negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance remains relatively constant for an increasing number of processors, and further refinement has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an accurate representation of the slowdown in parallel scaling expected for the timing of a V-cycle.

    +

    The value MGTools::workload_imbalance() $= 1/\mathbb{E}$ then represents the factor increase in timings we expect for GMG methods (vmults, assembly, etc.) due to the imbalance of the mesh partition compared to a perfectly load-balanced workload. We will report on these in the results section below for a sequence of meshes, and compare with the observed slow-downs as we go to larger and larger processor numbers (where, typically, the load imbalance becomes larger as well).

    +

    These sorts of considerations are considered in much greater detail in [clevenger_par_gmg], which contains a full discussion of the partition efficiency model and the effect the imbalance has on the GMG V-cycle timing. In summary, the value of $\mathbb{E}$ is highly dependent on the degree of local mesh refinement used and has an optimal value $\mathbb{E} \approx 1$ for globally refined meshes. Typically for adaptively refined meshes, the number of processors used to distribute a single mesh has a negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance remains relatively constant for an increasing number of processors, and further refinement has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an accurate representation of the slowdown in parallel scaling expected for the timing of a V-cycle.

    It should be noted that there is potential for some asynchronous work between multigrid levels, specifically with purely nearest neighbor MPI communication, and an adaptive mesh could be constructed such that the efficiency model would far overestimate the V-cycle slowdown due to the asynchronous work "covering up" the imbalance (which assumes synchronization over levels). However, for most realistic adaptive meshes the expectation is that this asynchronous work will only cover up a very small portion of the imbalance and the efficiency model will describe the slowdown very well.

    Workload imbalance for algebraic multigrid methods

    -

    The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

    +

    The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

    Algebraic multigrid methods are of course based on an entirely different approach to creating a hierarchy of levels. In particular, they create these purely based on analyzing the system matrix, and very sophisticated algorithms for ensuring that the problem is well load-balanced on every level are implemented in both the hypre and ML/MueLu packages that underly the TrilinosWrappers::PreconditionAMG and PETScWrappers::PreconditionBoomerAMG classes. In some sense, these algorithms are simpler than for geometric multigrid methods because they only deal with the matrix itself, rather than all of the connotations of meshes, neighbors, parents, and other geometric entities. At the same time, much work has also been put into making algebraic multigrid methods scale to very large problems, including questions such as reducing the number of processors that work on a given level of the hierarchy to a subset of all processors, if otherwise processors would spend less time on computations than on communication. (One might note that it is of course possible to implement these same kinds of ideas also in geometric multigrid algorithms where one purposefully idles some processors on coarser levels to reduce the amount of communication. deal.II just doesn't do this at this time.)

    These are not considerations we typically have to worry about here, however: For most purposes, we use algebraic multigrid methods as black-box methods.

    Running the program

    @@ -1079,9 +1079,9 @@

    The result is a function that is similar to the one found in the "Use FEEvaluation::read_dof_values_plain() to avoid resolving constraints" subsection in the "Possibilities for extensions" section of step-37.

    -

    The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

    +

    The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

    Obviously, since we are considering a problem with zero Dirichlet boundary, we could have taken a similar approach to step-37 assemble_rhs(), but this additional work allows us to change the problem declaration if we so choose.

    -

    This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

    +

    This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

    Finally, the system_rhs vector is of type LA::MPI::Vector, but the MatrixFree class only work for LinearAlgebra::distributed::Vector. Therefore we must compute the right-hand side using MatrixFree functionality and then use the functions in the ChangeVectorType namespace to copy it to the correct type.

      template <int dim, int degree>
      void LaplaceProblem<dim, degree>::assemble_rhs()
    @@ -1452,7 +1452,7 @@
    typename ActiveSelector::active_cell_iterator active_cell_iterator
    UpdateFlags
    -

    Assembler for cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$

    +

    Assembler for cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$

      auto cell_worker = [&](const Iterator & cell,
      ScratchData<dim> &scratch_data,
      CopyData & copy_data) {
    @@ -1482,8 +1482,8 @@
     
    void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell)
    DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
    -

    Assembler for face term $\sum_F h_F \| \jump{\epsilon \nabla u \cdot n}
-   \|_F^2$

    +

    Assembler for face term $\sum_F h_F \| \jump{\epsilon \nabla u \cdot n}
+   \|_F^2$

      auto face_worker = [&](const Iterator & cell,
      const unsigned int &f,
      const unsigned int &sf,
    @@ -1860,12 +1860,12 @@

    Here, the timing of the solve() function is split up in 3 parts: setting up the multigrid preconditioner, execution of a single multigrid V-cycle, and the CG solver. The V-cycle that is timed is unnecessary for the overall solve and only meant to give an insight at the different costs for AMG and GMG. Also it should be noted that when using the AMG solver, "Workload imbalance" is not included in the output since the hierarchy of coarse meshes is not required.

    All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions, allowing for vectorization over 8 doubles (vectorization used only in the matrix-free computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos 12.10.1 is used for the matrix-based GMG/AMG computations.

    We can then gather a variety of information by calling the program with the input files that are provided in the directory in which step-50 is located. Using these, and adjusting the number of mesh refinement steps, we can produce information about how well the program scales.

    -

    The following table gives weak scaling timings for this program on up to 256M DoFs and 7,168 processors. (Recall that weak scaling keeps the number of degrees of freedom per processor constant while increasing the number of processors; i.e., it considers larger and larger problems.) Here, $\mathbb{E}$ is the partition efficiency from the introduction (also equal to 1.0/workload imbalance), "Setup" is a combination of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks, and "Prec" is the preconditioner setup. Ideally all times would stay constant over each problem size for the individual solvers, but since the partition efficiency decreases from 0.371 to 0.161 from largest to smallest problem size, we expect to see an approximately $0.371/0.161=2.3$ times increase in timings for GMG. This is, in fact, pretty close to what we really get:

    +

    The following table gives weak scaling timings for this program on up to 256M DoFs and 7,168 processors. (Recall that weak scaling keeps the number of degrees of freedom per processor constant while increasing the number of processors; i.e., it considers larger and larger problems.) Here, $\mathbb{E}$ is the partition efficiency from the introduction (also equal to 1.0/workload imbalance), "Setup" is a combination of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks, and "Prec" is the preconditioner setup. Ideally all times would stay constant over each problem size for the individual solvers, but since the partition efficiency decreases from 0.371 to 0.161 from largest to smallest problem size, we expect to see an approximately $0.371/0.161=2.3$ times increase in timings for GMG. This is, in fact, pretty close to what we really get:

    - + @@ -1875,8 +1875,8 @@
    MF-GMG MB-GMG AMG
    Procs Cycle DoFs $\mathbb{E}$ Setup Prec Solve Total Setup Prec Solve Total Setup Prec Solve Total
    Procs Cycle DoFs $\mathbb{E}$ Setup Prec Solve Total Setup Prec Solve Total Setup Prec Solve Total
    112 13 4M 0.37 0.742 0.393 0.200 1.335 1.714 2.934 0.716 5.364 1.544 0.456 1.150 3.150
    7,168 19 256M 0.16 1.214 0.893 0.521 2.628 2.386 7.260 2.560 12.206 1.844 1.010 1.890 4.744
    -

    On the other hand, the algebraic multigrid in the last set of columns is relatively unaffected by the increasing imbalance of the mesh hierarchy (because it doesn't use the mesh hierarchy) and the growth in time is rather driven by other factors that are well documented in the literature (most notably that the algorithmic complexity of some parts of algebraic multigrid methods appears to be ${\cal O}(N
-\log N)$ instead of ${\cal O}(N)$ for geometric multigrid).

    +

    On the other hand, the algebraic multigrid in the last set of columns is relatively unaffected by the increasing imbalance of the mesh hierarchy (because it doesn't use the mesh hierarchy) and the growth in time is rather driven by other factors that are well documented in the literature (most notably that the algorithmic complexity of some parts of algebraic multigrid methods appears to be ${\cal O}(N
+\log N)$ instead of ${\cal O}(N)$ for geometric multigrid).

    The upshort of the table above is that the matrix-free geometric multigrid method appears to be the fastest approach to solving this equation if not by a huge margin. Matrix-based methods, on the other hand, are consistently the worst.

    The following figure provides strong scaling results for each method, i.e., we solve the same problem on more and more processors. Specifically, we consider the problems after 16 mesh refinement cycles (32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:

    @@ -1886,7 +1886,7 @@

    The finite element degree is currently hard-coded as 2, see the template arguments of the main class. It is easy to change. To test, it would be interesting to switch to a test problem with a reference solution. This way, you can compare error rates.

    Coarse solver

    A more interesting example would involve a more complicated coarse mesh (see step-49 for inspiration). The issue in that case is that the coarsest level of the mesh hierarchy is actually quite large, and one would have to think about ways to solve the coarse level problem efficiently. (This is not an issue for algebraic multigrid methods because they would just continue to build coarser and coarser levels of the matrix, regardless of their geometric origin.)

    -

    In the program here, we simply solve the coarse level problem with a Conjugate Gradient method without any preconditioner. That is acceptable if the coarse problem is really small – for example, if the coarse mesh had a single cell, then the coarse mesh problems has a $9\times 9$ matrix in 2d, and a $27\times 27$ matrix in 3d; for the coarse mesh we use on the $L$-shaped domain of the current program, these sizes are $21\times 21$ in 2d and $117\times 117$ in 3d. But if the coarse mesh consists of hundreds or thousands of cells, this approach will no longer work and might start to dominate the overall run-time of each V-cycle. A common approach is then to solve the coarse mesh problem using an algebraic multigrid preconditioner; this would then, however, require assembling the coarse matrix (even for the matrix-free version) as input to the AMG implementation.

    +

    In the program here, we simply solve the coarse level problem with a Conjugate Gradient method without any preconditioner. That is acceptable if the coarse problem is really small – for example, if the coarse mesh had a single cell, then the coarse mesh problems has a $9\times 9$ matrix in 2d, and a $27\times 27$ matrix in 3d; for the coarse mesh we use on the $L$-shaped domain of the current program, these sizes are $21\times 21$ in 2d and $117\times 117$ in 3d. But if the coarse mesh consists of hundreds or thousands of cells, this approach will no longer work and might start to dominate the overall run-time of each V-cycle. A common approach is then to solve the coarse mesh problem using an algebraic multigrid preconditioner; this would then, however, require assembling the coarse matrix (even for the matrix-free version) as input to the AMG implementation.

    The plain program

    /* ---------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-03-17 21:57:47.327258287 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-03-17 21:57:47.327258287 +0000 @@ -157,7 +157,7 @@

    Introduction

    This tutorial program presents the implementation of a hybridizable discontinuous Galkerin method for the convection-diffusion equation.

    Hybridizable discontinuous Galerkin methods

    -

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    +

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    Reducing the size of the linear system

    To alleviate the computational cost of solving such large linear systems, the hybridizable discontinuous Galerkin (HDG) methodology was introduced by Cockburn and co-workers (see the references in the recent HDG overview article by Nguyen and Peraire [Ngu2012]).

    The HDG method achieves this goal by formulating the mathematical problem using Dirichlet-to-Neumann mappings. The partial differential equations are first written as a first order system, and each field is then discretized via a DG method. At this point, the single-valued "trace" values on the skeleton of the mesh, i.e., element faces, are taken to be independent unknown quantities. This yields unknowns in the discrete formulation that fall into two categories:

    The Diffusion class

    -

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
-   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

    +

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
+   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

      class Diffusion
      {
      public:
    @@ -424,8 +424,8 @@
    void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())

    Diffusion::assemble_system

    -

    In this function, we compute $-\int D \nabla b_i \cdot \nabla b_j
-   d\boldsymbol{r} - \int \Sigma_a b_i b_j d\boldsymbol{r}$ and the mass matrix $\int b_i b_j d\boldsymbol{r}$. The mass matrix is then inverted using a direct solver; the inverse_mass_matrix variable will then store the inverse of the mass matrix so that $M^{-1}$ can be applied to a vector using the vmult() function of that object. (Internally, UMFPACK does not really store the inverse of the matrix, but its LU factors; applying the inverse matrix is then equivalent to doing one forward and one backward solves with these two factors, which has the same complexity as applying an explicit inverse of the matrix).

    +

    In this function, we compute $-\int D \nabla b_i \cdot \nabla b_j
+   d\boldsymbol{r} - \int \Sigma_a b_i b_j d\boldsymbol{r}$ and the mass matrix $\int b_i b_j d\boldsymbol{r}$. The mass matrix is then inverted using a direct solver; the inverse_mass_matrix variable will then store the inverse of the mass matrix so that $M^{-1}$ can be applied to a vector using the vmult() function of that object. (Internally, UMFPACK does not really store the inverse of the matrix, but its LU factors; applying the inverse matrix is then equivalent to doing one forward and one backward solves with these two factors, which has the same complexity as applying an explicit inverse of the matrix).

      void Diffusion::assemble_system()
      {
      system_matrix = 0.;
    @@ -513,8 +513,8 @@
    ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)

    Diffusion::evaluate_diffusion

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-03-17 21:57:47.455259078 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-03-17 21:57:47.455259078 +0000 @@ -142,18 +142,18 @@

    To illustrate how one describes geometries using charts in deal.II, we will consider a case that originates in an application of the ASPECT mantle convection code, using a data set provided by D. Sarah Stamps. In the concrete application, we were interested in describing flow in the Earth mantle under the East African Rift, a zone where two continental plates drift apart. Not to beat around the bush, the geometry we want to describe looks like this:

    In particular, though you cannot see this here, the top surface is not just colored by the elevation but is, in fact, deformed to follow the correct topography. While the actual application is not relevant here, the geometry is. The domain we are interested in is a part of the Earth that ranges from the surface to a depth of 500km, from 26 to 35 degrees East of the Greenwich meridian, and from 5 degrees North of the equator to 10 degrees South.

    -

    This description of the geometry suggests to start with a box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    -

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    -\[
+<p>This description of the geometry suggests to start with a box <picture><source srcset=$\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    +

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    +\[
   \mathbf x = \varphi^{-1}(\hat \phi,\hat \theta,\hat d)
   = (R+\hat d) (\cos\hat \phi\cos\hat \theta, \sin\hat \phi\cos\hat \theta, \sin\hat \theta)^T
-\] +\]" src="form_5667.png"/>

    -

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    +

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    1. It is flattened at the poles and larger at the equator: the semi-major axis is approximately 22km longer than the semi-minor axis. We will account for this using the WGS 84 reference standard for the Earth shape. The formula used in WGS 84 to obtain a position in Cartesian coordinates from longitude, latitude, and elevation is

      -\[
+<picture><source srcset=\[
   \mathbf x = \varphi_\text{WGS84}^{-1}(\phi,\theta,d)
   = \left(
     \begin{array}{c}
@@ -162,14 +162,14 @@
      ((1-e^2)\bar R(\theta)+d) \sin\theta
     \end{array}
     \right),
-\] +\]" src="form_5668.png"/>

      -

      where $\bar R(\theta)=\frac{R}{\sqrt{1-(e \sin\theta)^2}}$, and radius and ellipticity are given by $R=6378137\text{m}, e=0.081819190842622$. In this formula, we assume that the arguments to sines and cosines are evaluated in degree, not radians (though we will have to change this assumption in the code).

      +

      where $\bar R(\theta)=\frac{R}{\sqrt{1-(e \sin\theta)^2}}$, and radius and ellipticity are given by $R=6378137\text{m}, e=0.081819190842622$. In this formula, we assume that the arguments to sines and cosines are evaluated in degree, not radians (though we will have to change this assumption in the code).

    2. -It has topography in the form of mountains and valleys. We will account for this using real topography data (see below for a description of where this data comes from). Using this data set, we can look up elevations on a latitude-longitude mesh laid over the surface of the Earth. Starting with the box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$, we will therefore first stretch it in vertical direction before handing it off to the WGS 84 function: if $h(\hat\phi,\hat\theta)$ is the height at longitude $\hat\phi$ and latitude $\hat\theta$, then we define

      -\[
+It has topography in the form of mountains and valleys. We will account for this using real topography data (see below for a description of where this data comes from). Using this data set, we can look up elevations on a latitude-longitude mesh laid over the surface of the Earth. Starting with the box <picture><source srcset=$\hat U=[26,35]\times[-10,5]\times[-500000,0]$, we will therefore first stretch it in vertical direction before handing it off to the WGS 84 function: if $h(\hat\phi,\hat\theta)$ is the height at longitude $\hat\phi$ and latitude $\hat\theta$, then we define

      +\[
   (\phi,\theta,d) =
   \varphi_\text{topo}^{-1}(\hat\phi,\hat\theta,\hat d)
   = \left(
@@ -177,30 +177,30 @@
       \hat\theta,
       \hat d + \frac{\hat d+500000}{500000}h(\hat\phi,\hat\theta)
     \right).
-\] +\]" src="form_5674.png"/>

      - Using this function, the top surface of the box $\hat U$ is displaced to the correct topography, the bottom surface remains where it was, and points in between are linearly interpolated.
    3. + Using this function, the top surface of the box $\hat U$ is displaced to the correct topography, the bottom surface remains where it was, and points in between are linearly interpolated.
    -

    Using these two functions, we can then define the entire push-forward function $\varphi^{-1}: \hat U \rightarrow \Omega$ as

    -\[
+<p>Using these two functions, we can then define the entire push-forward function <picture><source srcset=$\varphi^{-1}: \hat U \rightarrow \Omega$ as

    +\[
   \mathbf x
   =
   \varphi^{-1}(\hat\phi,\hat\theta,\hat d)
   =
   \varphi_\text{WGS84}^{-1}(\varphi_\text{topo}^{-1}(\hat\phi,\hat\theta,\hat d)).
-\] +\]" src="form_5676.png"/>

    In addition, we will have to define the inverse of this function, the pull-back operation, which we can write as

    -\[
+<picture><source srcset=\[
   (\hat\phi,\hat\theta,\hat d)
   =
   \varphi(\mathbf x)
   =
   \varphi_\text{topo}(\varphi_\text{WGS84}(\mathbf x)).
-\] +\]" src="form_5677.png"/>

    We can obtain one of the components of this function by inverting the formula above:

    -\[
+<picture><source srcset=\[
   (\hat\phi,\hat\theta,\hat d) =
   \varphi_\text{topo}(\phi,\theta,d)
   = \left(
@@ -208,11 +208,11 @@
       \theta,
       500000\frac{d-h(\phi,\theta)}{500000+h(\phi,\theta)}
     \right).
-\] +\]" src="form_5678.png"/>

    -

    Computing $\varphi_\text{WGS84}(\mathbf x)$ is also possible though a lot more awkward. We won't show the formula here but instead only provide the implementation in the program.

    +

    Computing $\varphi_\text{WGS84}(\mathbf x)$ is also possible though a lot more awkward. We won't show the formula here but instead only provide the implementation in the program.

    Implementation

    -

    There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

    class AfricaGeometry : public ChartManifold<3,3>
    +

    There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

    class AfricaGeometry : public ChartManifold<3,3>
    {
    public:
    virtual
    @@ -231,7 +231,7 @@
    virtual Point< chartdim > pull_back(const Point< spacedim > &space_point) const =0
    Definition point.h:112

    The transformations above have two parts: the WGS 84 transformations and the topography transformation. Consequently, the AfricaGeometry class will have additional (non-virtual) member functions AfricaGeometry::push_forward_wgs84() and AfricaGeometry::push_forward_topo() that implement these two pieces, and corresponding pull back functions.

    -

    The WGS 84 transformation functions are not particularly interesting (even though the formulas they implement are impressive). The more interesting part is the topography transformation. Recall that for this, we needed to evaluate the elevation function $h(\hat\phi,\hat\theta)$. There is of course no formula for this: Earth is what it is, the best one can do is look up the altitude from some table. This is, in fact what we will do.

    +

    The WGS 84 transformation functions are not particularly interesting (even though the formulas they implement are impressive). The more interesting part is the topography transformation. Recall that for this, we needed to evaluate the elevation function $h(\hat\phi,\hat\theta)$. There is of course no formula for this: Earth is what it is, the best one can do is look up the altitude from some table. This is, in fact what we will do.

    The data we use was originally created by the Shuttle Radar Topography Mission, was downloaded from the US Geologic Survey (USGS) and processed by D. Sarah Stamps who also wrote the initial version of the WGS 84 transformation functions. The topography data so processed is stored in a file topography.txt.gz that, when unpacked looks like this:

    6.983333 25.000000 700
    6.983333 25.016667 692
    6.983333 25.033333 701
    @@ -243,12 +243,12 @@
    -11.983333 35.966667 687
    -11.983333 35.983333 659

    The data is formatted as latitude longitude elevation where the first two columns are provided in degrees North of the equator and degrees East of the Greenwich meridian. The final column is given in meters above the WGS 84 zero elevation.

    -

    In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

    +

    In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

    Now, while tutorial programs are meant to illustrate how to code with deal.II, they do not necessarily have to satisfy the same quality standards as one would have to do with production codes. In a production code, we would write a function that reads the data and (i) automatically determines the extents of the first and second column, (ii) automatically determines the number of data points in each direction, (iii) does the interpolation regardless of the order in which data is arranged, if necessary by switching the order between reading and presenting it to the Functions::InterpolatedUniformGridData class.

    On the other hand, tutorial programs are best if they are short and demonstrate key points rather than dwell on unimportant aspects and, thereby, obscure what we really want to show. Consequently, we will allow ourselves a bit of leeway:

    All of this then calls for a class that essentially looks like this:

    class AfricaTopography
    {
    @@ -269,7 +269,7 @@
    };
    static constexpr double PI
    Definition numbers.h:259
    -

    Note how the value() function negates the latitude. It also switches from the format $\phi,\theta$ that we use everywhere else to the latitude-longitude format used in the table. Finally, it takes its arguments in radians as that is what we do everywhere else in the program, but then converts them to the degree-based system used for table lookup. As you will see in the implementation below, the function has a few more (static) member functions that we will call in the initialization of the topography_data member variable: the class type of this variable has a constructor that allows us to set everything right at construction time, rather than having to fill data later on, but this constructor takes a number of objects that can't be constructed in-place (at least not in C++98). Consequently, the construction of each of the objects we want to pass in the initialization happens in a number of static member functions.

    +

    Note how the value() function negates the latitude. It also switches from the format $\phi,\theta$ that we use everywhere else to the latitude-longitude format used in the table. Finally, it takes its arguments in radians as that is what we do everywhere else in the program, but then converts them to the degree-based system used for table lookup. As you will see in the implementation below, the function has a few more (static) member functions that we will call in the initialization of the topography_data member variable: the class type of this variable has a constructor that allows us to set everything right at construction time, rather than having to fill data later on, but this constructor takes a number of objects that can't be constructed in-place (at least not in C++98). Consequently, the construction of each of the objects we want to pass in the initialization happens in a number of static member functions.

    Having discussed the general outline of how we want to implement things, let us go to the program and show how it is done in practice.

    The commented program

    Let us start with the include files we need here. Obviously, we need the ones that describe the triangulation (tria.h), and that allow us to create and output triangulations (grid_generator.h and grid_out.h). Furthermore, we need the header file that declares the Manifold and ChartManifold classes that we will need to describe the geometry (manifold.h). We will then also need the GridTools::transform() function from the last of the following header files; the purpose for this function will become discussed at the point where we use it.

    @@ -299,7 +299,7 @@
     
    dealii
    Definition namespace_dealii.h:26

    Describing topography: AfricaTopography

    -

    The first significant part of this program is the class that describes the topography $h(\hat phi,\hat \theta)$ as a function of longitude and latitude. As discussed in the introduction, we will make our life a bit easier here by not writing the class in the most general way possible but by only writing it for the particular purpose we are interested in here: interpolating data obtained from one very specific data file that contains information about a particular area of the world for which we know the extents.

    +

    The first significant part of this program is the class that describes the topography $h(\hat phi,\hat \theta)$ as a function of longitude and latitude. As discussed in the introduction, we will make our life a bit easier here by not writing the class in the most general way possible but by only writing it for the particular purpose we are interested in here: interpolating data obtained from one very specific data file that contains information about a particular area of the world for which we know the extents.

    The general layout of the class has been discussed already above. Following is its declaration, including three static member functions that we will need in initializing the topography_data member variable.

      class AfricaTopography
      {
    @@ -315,7 +315,7 @@
      };
     
     
    -

    Let us move to the implementation of the class. The interesting parts of the class are the constructor and the value() function. The former initializes the Functions::InterpolatedUniformGridData member variable and we will use the constructor that requires us to pass in the end points of the 2-dimensional data set we want to interpolate (which are here given by the intervals $[-6.983333, 11.98333]$, using the trick of switching end points discussed in the introduction, and $[25, 35.983333]$, both given in degrees), the number of intervals into which the data is split (379 in latitude direction and 219 in longitude direction, for a total of $380\times 220$ data points), and a Table object that contains the data. The data then of course has size $380\times 220$ and we initialize it by providing an iterator to the first of the 83,600 elements of a std::vector object returned by the get_data() function below. Note that all of the member functions we call here are static because (i) they do not access any member variables of the class, and (ii) because they are called at a time when the object is not initialized fully anyway.

    +

    Let us move to the implementation of the class. The interesting parts of the class are the constructor and the value() function. The former initializes the Functions::InterpolatedUniformGridData member variable and we will use the constructor that requires us to pass in the end points of the 2-dimensional data set we want to interpolate (which are here given by the intervals $[-6.983333, 11.98333]$, using the trick of switching end points discussed in the introduction, and $[25, 35.983333]$, both given in degrees), the number of intervals into which the data is split (379 in latitude direction and 219 in longitude direction, for a total of $380\times 220$ data points), and a Table object that contains the data. The data then of course has size $380\times 220$ and we initialize it by providing an iterator to the first of the 83,600 elements of a std::vector object returned by the get_data() function below. Note that all of the member functions we call here are static because (i) they do not access any member variables of the class, and (ii) because they are called at a time when the object is not initialized fully anyway.

      AfricaTopography::AfricaTopography()
      : topography_data({{std::make_pair(-6.983333, 11.966667),
      std::make_pair(25, 35.95)}},
    @@ -415,7 +415,7 @@
      }
     
     
    -

    The following two functions then define the forward and inverse transformations that correspond to the WGS 84 reference shape of Earth. The forward transform follows the formula shown in the introduction. The inverse transform is significantly more complicated and is, at the very least, not intuitive. It also suffers from the fact that it returns an angle that at the end of the function we need to clip back into the interval $[0,2\pi]$ if it should have escaped from there.

    +

    The following two functions then define the forward and inverse transformations that correspond to the WGS 84 reference shape of Earth. The forward transform follows the formula shown in the introduction. The inverse transform is significantly more complicated and is, at the very least, not intuitive. It also suffers from the fact that it returns an angle that at the end of the function we need to clip back into the interval $[0,2\pi]$ if it should have escaped from there.

      Point<3> AfricaGeometry::push_forward_wgs84(const Point<3> &phi_theta_d) const
      {
      const double phi = phi_theta_d[0];
    @@ -485,7 +485,7 @@
     
    SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)

    Creating the mesh

    -

    Having so described the properties of the geometry, not it is time to deal with the mesh used to discretize it. To this end, we create objects for the geometry and triangulation, and then proceed to create a $1\times 2\times 1$ rectangular mesh that corresponds to the reference domain $\hat U=[26,35]\times[-10,5]\times[-500000,0]$. We choose this number of subdivisions because it leads to cells that are roughly like cubes instead of stretched in one direction or another.

    +

    Having so described the properties of the geometry, not it is time to deal with the mesh used to discretize it. To this end, we create objects for the geometry and triangulation, and then proceed to create a $1\times 2\times 1$ rectangular mesh that corresponds to the reference domain $\hat U=[26,35]\times[-10,5]\times[-500000,0]$. We choose this number of subdivisions because it leads to cells that are roughly like cubes instead of stretched in one direction or another.

    Of course, we are not actually interested in meshing the reference domain. We are interested in meshing the real domain. Consequently, we will use the GridTools::transform() function that simply moves every point of a triangulation according to a given transformation. The transformation function it wants is a function that takes as its single argument a point in the reference domain and returns the corresponding location in the domain that we want to map to. This is, of course, exactly the push forward function of the geometry we use. We wrap it by a lambda function to obtain the kind of function object required for the transformation.

      void run()
      {
    @@ -519,8 +519,8 @@
      for (const auto &cell : triangulation.active_cell_iterators())
      cell->set_all_manifold_ids(0);
     
    -

    The last step is to refine the mesh beyond its initial $1\times 2\times
-   1$ coarse mesh. We could just refine globally a number of times, but since for the purpose of this tutorial program we're really only interested in what is happening close to the surface, we just refine 6 times all of the cells that have a face at a boundary with indicator 5. Looking this up in the documentation of the GridGenerator::subdivided_hyper_rectangle() function we have used above reveals that boundary indicator 5 corresponds to the top surface of the domain (and this is what the last true argument in the call to GridGenerator::subdivided_hyper_rectangle() above meant: to "color" the boundaries by assigning each boundary a unique boundary indicator).

    +

    The last step is to refine the mesh beyond its initial $1\times 2\times
+   1$ coarse mesh. We could just refine globally a number of times, but since for the purpose of this tutorial program we're really only interested in what is happening close to the surface, we just refine 6 times all of the cells that have a face at a boundary with indicator 5. Looking this up in the documentation of the GridGenerator::subdivided_hyper_rectangle() function we have used above reveals that boundary indicator 5 corresponds to the top surface of the domain (and this is what the last true argument in the call to GridGenerator::subdivided_hyper_rectangle() above meant: to "color" the boundaries by assigning each boundary a unique boundary indicator).

      for (unsigned int i = 0; i < 6; ++i)
      {
      for (const auto &cell : triangulation.active_cell_iterators())
    @@ -682,9 +682,9 @@

    This all begs two questions: first, does it matter, and second, could this be fixed. Let us discuss these in the following:

    -

    We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

    +

    We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

    \begin{eqnarray*}
   - \triangle \textbf{u} + \nabla p &=& \textbf{f}, \\
   -\textrm{div}\; \textbf{u} &=& 0.
@@ -149,8 +149,8 @@
 <p><a class=

    Optimal preconditioners

    Make sure that you read (even better: try) what is described in "Block Schur complement preconditioner" in the "Possible Extensions" section in step-22. Like described there, we are going to solve the block system using a Krylov method and a block preconditioner.

    -

    Our goal here is to construct a very simple (maybe the simplest?) optimal preconditioner for the linear system. A preconditioner is called "optimal" or "of optimal complexity", if the number of iterations of the preconditioned system is independent of the mesh size $h$. You can extend that definition to also require indepence of the number of processors used (we will discuss that in the results section), the computational domain and the mesh quality, the test case itself, the polynomial degree of the finite element space, and more.

    -

    Why is a constant number of iterations considered to be "optimal"? Assume the discretized PDE gives a linear system with N unknowns. Because the matrix coming from the FEM discretization is sparse, a matrix-vector product can be done in O(N) time. A preconditioner application can also only be O(N) at best (for example doable with multigrid methods). If the number of iterations required to solve the linear system is independent of $h$ (and therefore N), the total cost of solving the system will be O(N). It is not possible to beat this complexity, because even looking at all the entries of the right-hand side already takes O(N) time. For more information see [elman2005], Chapter 2.5 (Multigrid).

    +

    Our goal here is to construct a very simple (maybe the simplest?) optimal preconditioner for the linear system. A preconditioner is called "optimal" or "of optimal complexity", if the number of iterations of the preconditioned system is independent of the mesh size $h$. You can extend that definition to also require indepence of the number of processors used (we will discuss that in the results section), the computational domain and the mesh quality, the test case itself, the polynomial degree of the finite element space, and more.

    +

    Why is a constant number of iterations considered to be "optimal"? Assume the discretized PDE gives a linear system with N unknowns. Because the matrix coming from the FEM discretization is sparse, a matrix-vector product can be done in O(N) time. A preconditioner application can also only be O(N) at best (for example doable with multigrid methods). If the number of iterations required to solve the linear system is independent of $h$ (and therefore N), the total cost of solving the system will be O(N). It is not possible to beat this complexity, because even looking at all the entries of the right-hand side already takes O(N) time. For more information see [elman2005], Chapter 2.5 (Multigrid).

    The preconditioner described here is even simpler than the one described in step-22 and will typically require more iterations and consequently time to solve. When considering preconditioners, optimality is not the only important metric. But an optimal and expensive preconditioner is typically more desirable than a cheaper, non-optimal one. This is because, eventually, as the mesh size becomes smaller and smaller and linear problems become bigger and bigger, the former will eventually beat the latter.

    The solver and preconditioner

    We precondition the linear system

    @@ -181,14 +181,14 @@ \end{eqnarray*}" src="form_5701.png"/>

    where $S=-BA^{-1} B^T$ is the Schur complement.

    -

    With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

    -

    We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

    -

    For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

    -

    For the approximation of the velocity block $A$ we will perform a single AMG V-cycle. In practice this choice is not exactly independent of $h$, which can explain the slight increase in iteration numbers. A possible explanation is that the coarsest level will be solved exactly and the number of levels and size of the coarsest matrix is not predictable.

    +

    With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

    +

    We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

    +

    For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

    +

    For the approximation of the velocity block $A$ we will perform a single AMG V-cycle. In practice this choice is not exactly independent of $h$, which can explain the slight increase in iteration numbers. A possible explanation is that the coarsest level will be solved exactly and the number of levels and size of the coarsest matrix is not predictable.

    The testcase

    We will construct a manufactured solution based on the classical Kovasznay problem, see [kovasznay1948laminar]. Here is an image of the solution colored by the x velocity including streamlines of the velocity:

    -

    We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

    +

    We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

    The right-hand side is computed using the script "reference.py" and we use the exact solution for boundary conditions and error computation.

    The commented program

      #include <deal.II/base/quadrature_lib.h>
    @@ -1026,7 +1026,7 @@
    void interpolate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const ComponentMask &component_mask=ComponentMask())

    Results

    -

    As expected from the discussion above, the number of iterations is independent of the number of processors and only very slightly dependent on $h$:

    +

    As expected from the discussion above, the number of iterations is independent of the number of processors and only very slightly dependent on $h$:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-03-17 21:57:47.615260065 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-03-17 21:57:47.615260065 +0000 @@ -141,7 +141,7 @@
    Note
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.400995

    Introduction

    Stokes Problem

    -

    The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

    +

    The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

    Let $u \in H_0^1 = \{ u \in H^1(\Omega), u|_{\partial \Omega} = 0 \}$ and $p \in L_*^2 = \{ p \in L^2(\Omega), \int_\Omega p = 0
 \}$. The Stokes equations read as follows in non-dimensionalized form:

    @@ -160,7 +160,7 @@ \left(\begin{array}{c} F \\ 0 \end{array}\right). \end{eqnarray*}" src="form_5709.png"/>

    -

    Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

    +

    Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

    \begin{eqnarray*}
 \left(\begin{array}{cc} A & B^T \\ B & 0 \end{array}\right) P^{-1}
@@ -173,7 +173,7 @@
  S \end{array}\right)^{-1}
 \end{eqnarray*}

    -

    is a good choice. Let $\widetilde{A^{-1}}$ be an approximation of $A^{-1}$ and $\widetilde{S^{-1}}$ of $S^{-1}$, we see

    +

    is a good choice. Let $\widetilde{A^{-1}}$ be an approximation of $A^{-1}$ and $\widetilde{S^{-1}}$ of $S^{-1}$, we see

    \begin{eqnarray*}
 P^{-1} =
 \left(\begin{array}{cc} A^{-1} & 0 \\ 0 & I \end{array}\right)
@@ -185,9 +185,9 @@
 \left(\begin{array}{cc} I & 0 \\ 0 & \widetilde{S^{-1}} \end{array}\right).
   \end{eqnarray*}

    -

    Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

    +

    Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

    As discussed in step-22, $-M_p^{-1}=:\widetilde{S^{-1}} \approx
-S^{-1}$, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

    +S^{-1}$" src="form_5716.png"/>, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

    As a comparison, instead of FGMRES, we also use the direct solver UMFPACK on the whole system to compare our results with. If you want to use a direct solver (like UMFPACK), the system needs to be invertible. To avoid the one dimensional null space given by the constant pressures, we fix the first pressure unknown to zero. This is not necessary for the iterative solvers.

    Reference Solution

    The test problem is a "Manufactured Solution" (see step-7 for details), and we choose $u=(u_1,u_2,u_3)=(2\sin (\pi x), - \pi y \cos
@@ -424,7 +424,7 @@
 <div class=  return return_value;

      }
     
    -

    Implementation of $f$. See the introduction for more information.

    +

    Implementation of $f$. See the introduction for more information.

      template <int dim>
      class RightHandSide : public Function<dim>
      {
    @@ -1357,14 +1357,14 @@
      }

    Results

    Errors

    -

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    +

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    \[
 \| u -u_h \|_0 + h ( \| u- u_h\|_1 + \|p - p_h \|_0)
 \leq C h^{k+1} ( \|u \|_{k+1} + \| p \|_k )
 \]

    -

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    +

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    PETSc number of processors
    @@ -1405,7 +1405,7 @@

    The introduction also outlined another option to precondition the overall system, namely one in which we do not choose $\widetilde
 {A^{-1}}=A^{-1}$ as in the table above, but in which $\widetilde{A^{-1}}$ is only a single preconditioner application with GMG or ILU, respectively.

    This is in fact implemented in the code: Currently, the boolean use_expensive in solve() is set to true. The option mentioned above is obtained by setting it to false.

    -

    What you will find is that the number of FGMRES iterations stays constant under refinement if you use GMG this way. This means that the Multigrid is optimal and independent of $h$.

    +

    What you will find is that the number of FGMRES iterations stays constant under refinement if you use GMG this way. This means that the Multigrid is optimal and independent of $h$.

    The plain program

    /* ---------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-03-17 21:57:47.691260535 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-03-17 21:57:47.687260510 +0000 @@ -147,7 +147,7 @@

    Introduction

    Navier Stokes Equations

    In this tutorial we show how to solve the incompressible Navier Stokes equations (NSE) with Newton's method. The flow we consider here is assumed to be steady. In a domain $\Omega \subset
-\mathbb{R}^{d}$, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    +\mathbb{R}^{d}$" src="form_5732.png"/>, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    \begin{eqnarray*}
 - \nu \Delta\textbf{u} + (\textbf{u} \cdot \nabla)\textbf{u} + \nabla p &=& \textbf{f}\\
 - \nabla \cdot \textbf{u} &=& 0.
@@ -229,7 +229,7 @@
 <p>Now, Newton's iteration can be used to solve for the update terms:</p>
 <ol>
 <li>
-Initialization: Initial guess <picture><source srcset=$u_0$ and $p_0$, tolerance $\tau$; +Initialization: Initial guess $u_0$ and $p_0$, tolerance $\tau$;

  • Linear solve to compute update term $\delta\textbf{u}^{k}$ and $\delta p^k$;
  • @@ -245,7 +245,7 @@

    Finding an Initial Guess

    The initial guess needs to be close enough to the solution for Newton's method to converge; hence, finding a good starting value is crucial to the nonlinear solver.

    -

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    +

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    However, the convective term $(\mathbf{u}\cdot\nabla)\mathbf{u}$ will be dominant if the viscosity is small, like $1/7500$ in test case 2. In this situation, we use a continuation method to set up a series of auxiliary NSEs with viscosity approaching the one in the target NSE. Correspondingly, we create a sequence $\{\nu_{i}\}$ with $\nu_{n}= \nu$, and accept that the solutions to two NSE with viscosity $\nu_{i}$ and $\nu_{i+1}$ are close if $|\nu_{i} -
 \nu_{i+1}|$ is small. Then we use the solution to the NSE with viscosity $\nu_{i}$ as the initial guess of the NSE with $\nu_{i+1}$. This can be thought of as a staircase from the Stokes equations to the NSE we want to solve.

    That is, we first solve a Stokes problem

    @@ -309,8 +309,8 @@ \end{pmatrix} \end{eqnarray*}" src="form_5776.png"/>

    -

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    -

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    +

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    +

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -319,13 +319,13 @@
   \end{pmatrix}^{-1}
 \end{eqnarray*}

    -

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    +

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    \begin{eqnarray*}
 \tilde{S}^{-1} \approx -(\nu+\gamma)M_p^{-1}.
 \end{eqnarray*}

    See [Benzi2006] for details.

    -

    We decompose $P^{-1}$ as

    +

    We decompose $P^{-1}$ as

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -342,18 +342,18 @@
   \end{pmatrix}.
 \end{eqnarray*}

    -

    Here two inexact solvers will be needed for $\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
+<p>Here two inexact solvers will be needed for <picture><source srcset=$\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
 M_p^{-1}B)_{ij}$, as explained in [HeisterRapin2013].

    Test Case

    -

    We use the lid driven cavity flow as our test case; see this page for details. The computational domain is the unit square and the right-hand side is $f=0$. The boundary condition is

    +

    We use the lid driven cavity flow as our test case; see this page for details. The computational domain is the unit square and the right-hand side is $f=0$. The boundary condition is

    \begin{eqnarray*}
   (u(x, y), v(x,y)) &=& (1,0) \qquad\qquad \textrm{if}\ y = 1 \\
   (u(x, y), v(x,y)) &=& (0,0) \qquad\qquad \textrm{otherwise}.
 \end{eqnarray*}

    When solving this problem, the error consists of the nonlinear error (from Newton's iteration) and the discretization error (dependent on mesh size). The nonlinear part decreases with each Newton iteration and the discretization error reduces with mesh refinement. In this example, the solution from the coarse mesh is transferred to successively finer meshes and used as an initial guess. Therefore, the nonlinear error is always brought below the tolerance of Newton's iteration and the discretization error is reduced with each mesh refinement.

    -

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    -

    The cavity reference values for $\mathrm{Re}=400$ and $\mathrm{Re}=7500$ are from [Ghia1982] and [Erturk2005], respectively, where $\mathrm{Re}$ is the Reynolds number. Here the viscosity is defined by $1/\mathrm{Re}$. Even though we can still find a solution for $\mathrm{Re}=10000$ and the papers cited throughout this introduction contain results for comparison, we limit our discussion here to $\mathrm{Re}=7500$. This is because the solution is no longer stationary starting around $\mathrm{Re}=8000$ but instead becomes periodic, see [Bruneau2006] for details.

    +

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    +

    The cavity reference values for $\mathrm{Re}=400$ and $\mathrm{Re}=7500$ are from [Ghia1982] and [Erturk2005], respectively, where $\mathrm{Re}$ is the Reynolds number. Here the viscosity is defined by $1/\mathrm{Re}$. Even though we can still find a solution for $\mathrm{Re}=10000$ and the papers cited throughout this introduction contain results for comparison, we limit our discussion here to $\mathrm{Re}=7500$. This is because the solution is no longer stationary starting around $\mathrm{Re}=8000$ but instead becomes periodic, see [Bruneau2006] for details.

    The commented program

    Include files

    As usual, we start by including some well-known files:

    @@ -802,8 +802,8 @@
     
    DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
  • If we were asked to assemble the Newton matrix, then we also built a pressure mass matrix in the bottom right block of the matrix. We only need this for the preconditioner, so we need to copy it in into a separate matrix object, followed by zeroing out this block in the Newton matrix.

    -

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    -

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

    +

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    +

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

      if (assemble_matrix)
      {
      pressure_mass_matrix.reinit(sparsity_pattern.block(1, 1));
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-03-17 21:57:47.763260979 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-03-17 21:57:47.767261004 +0000 @@ -170,7 +170,7 @@ \psi$" src="form_5811.png"/> has no spatial or temporal derivatives, i.e., it is a purely local operator. It turns out that we have efficient methods for each of these terms (in particular, we have analytic solutions for the latter), and that we may be better off treating these terms differently and separately. We will explain this in more detail below.

    A note about the character of the equations

    -

    At first glance, the equations appear to be parabolic and similar to the heat equation (see step-26) as there is only a single time derivative and two spatial derivatives. But this is misleading. Indeed, that this is not the correct interpretation is more easily seen if we assume for a moment that the potential $V=0$ and $\kappa=0$. Then we have the equation

    +

    At first glance, the equations appear to be parabolic and similar to the heat equation (see step-26) as there is only a single time derivative and two spatial derivatives. But this is misleading. Indeed, that this is not the correct interpretation is more easily seen if we assume for a moment that the potential $V=0$ and $\kappa=0$. Then we have the equation

    \begin{align*}
   - i \frac{\partial \psi}{\partial t}
   - \frac 12 \Delta \psi
@@ -188,7 +188,7 @@
   &= 0.
 \end{align*}

    -

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    +

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    \begin{align*}
   \frac{\partial^2 w}{\partial t^2}
   - \frac 12 \Delta \frac{\partial v}{\partial t}
@@ -202,9 +202,9 @@
   &= 0.
 \end{align*}

    -

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    +

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    The real NLSE, of course, also has the terms $V\psi$ and $\kappa|\psi|^2\psi$. However, these are of lower order in the spatial derivatives, and while they are obviously important, they do not change the character of the equation.

    -

    In any case, the purpose of this discussion is to figure out what time stepping scheme might be appropriate for the equation. The conclusions is that, as a hyperbolic-kind of equation, we need to choose a time step that satisfies a CFL-type condition. If we were to use an explicit method (which we will not), we would have to investigate the eigenvalues of the matrix that corresponds to the spatial operator. If you followed the discussions of the video lectures (See also video lecture 26, video lecture 27, video lecture 28.) then you will remember that the pattern is that one needs to make sure that $k^s \propto h^t$ where $k$ is the time step, $h$ the mesh width, and $s,t$ are the orders of temporal and spatial derivatives. Whether you take the original equation ( $s=1,t=2$) or the reformulation for only the real or imaginary part, the outcome is that we would need to choose $k \propto h^2$ if we were to use an explicit time stepping method. This is not feasible for the same reasons as in step-26 for the heat equation: It would yield impractically small time steps for even only modestly refined meshes. Rather, we have to use an implicit time stepping method and can then choose a more balanced $k \propto h$. Indeed, we will use the implicit Crank-Nicolson method as we have already done in step-23 before for the regular wave equation.

    +

    In any case, the purpose of this discussion is to figure out what time stepping scheme might be appropriate for the equation. The conclusions is that, as a hyperbolic-kind of equation, we need to choose a time step that satisfies a CFL-type condition. If we were to use an explicit method (which we will not), we would have to investigate the eigenvalues of the matrix that corresponds to the spatial operator. If you followed the discussions of the video lectures (See also video lecture 26, video lecture 27, video lecture 28.) then you will remember that the pattern is that one needs to make sure that $k^s \propto h^t$ where $k$ is the time step, $h$ the mesh width, and $s,t$ are the orders of temporal and spatial derivatives. Whether you take the original equation ( $s=1,t=2$) or the reformulation for only the real or imaginary part, the outcome is that we would need to choose $k \propto h^2$ if we were to use an explicit time stepping method. This is not feasible for the same reasons as in step-26 for the heat equation: It would yield impractically small time steps for even only modestly refined meshes. Rather, we have to use an implicit time stepping method and can then choose a more balanced $k \propto h$. Indeed, we will use the implicit Crank-Nicolson method as we have already done in step-23 before for the regular wave equation.

    The general idea of operator splitting

    Note
    The material presented here is also discussed in video lecture 30.25. (All video lectures are also available here.)

    If one thought of the NLSE as an ordinary differential equation in which the right hand side happens to have spatial derivatives, i.e., write it as

    @@ -342,7 +342,7 @@ I^{(1)} + I^{(2)} + I^{(3)}. \end{align*}" src="form_5845.png"/>

    -

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    +

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    Before we continue with discussions about operator splitting, let us talk about why one would even want to go this way? The answer is simple: For some of the separate equations for the $\psi^{(k)}$, we may have ways to solve them more efficiently than if we throw everything together and try to solve it at once. For example, and particularly pertinent in the current case: The equation for $\psi^{(3)}$, i.e.,

    \begin{align*}
   \frac{d\psi^{(3)}}{dt}
@@ -474,7 +474,7 @@
 </p>
 <p> (Compare this again with the $\psi(t_{n+1})$: It only differs in how we approximate $\psi(t)$ in each of the three integrals.) In other words, Lie splitting is a lot simpler to implement that the original method outlined above because data handling is so much simpler.

    Operator splitting: the "Strang splitting" approach

    -

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    +

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    But we typically want to use something higher order – say, a Crank-Nicolson or BDF2 method – since these are often not more expensive than a simple Euler method. It would be a shame if we were to use a time stepping method that is ${\cal O}(\Delta t^2)$, but then lose the accuracy again through the operator splitting.

    This is where the Strang splitting method comes in. It is easier to explain if we had only two parts, and so let us combine the effects of the Laplace operator and of the potential into one, and the phase rotation into a second effect. (Indeed, this is what we will do in the code since solving the equation with the Laplace equation with or without the potential costs the same – so we merge these two steps.) The Lie splitting method from above would then do the following: It computes solutions of the following two ODEs,

    \begin{align*}
@@ -564,9 +564,9 @@
 \end{align*}

    Here, the "previous" solution $\psi^{(n,1)}$ (or the "initial -condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after +condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after \_form#href_anchor" but more like "we've already done one third of the work necessary -for time step \_form#3044".)

    +for time step \_form#2972".)

    If we multiply the whole equation with $k_{n+1}$ and sort terms with the unknown $\psi^{(n+1,2)}$ to the left and those with the known $\psi^{(n,2)}$ to the right, then we obtain the following (spatial) partial differential equation that needs to be solved in each time step:

    \begin{align*}
   -i\psi^{(n,2)}
@@ -585,7 +585,7 @@
 <p><a class=

    Spatial discretization and dealing with complex variables

    As mentioned above, the previous tutorial program dealing with complex-valued solutions (namely, step-29) separated real and imaginary parts of the solution. It thus reduced everything to real arithmetic. In contrast, we here want to keep things complex-valued.

    The first part of this is that we need to define the discretized solution as $\psi_h^n(\mathbf x)=\sum_j \Psi^n_j \varphi_j(\mathbf
-x) \approx \psi(\mathbf x,t_n)$ where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    +x) \approx \psi(\mathbf x,t_n)$" src="form_5890.png"/> where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    Of more interest is how to build and solve the linear system. Obviously, this will only be necessary for the second step of the Strang splitting discussed above, with the time discretization of the previous subsection. We obtain the fully discrete version through straightforward substitution of $\psi^n$ by $\psi^n_h$ and multiplication by a test function:

    \begin{align*}
   -iM\Psi^{(n,2)}
@@ -663,7 +663,7 @@
   \int_\Omega \alpha_k e^{-\frac{r_k^2}{R^2}}
 \]

    -

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    +

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    \[
   \left(\int_\Omega e^{-\frac{r_k^2}{R^2}}\right)^{-1}
   =
@@ -671,15 +671,15 @@
 \]

    assuming for the moment that $\Omega={\mathbb R}^d$ – which is of course not the case, but we'll ignore the small difference in integral.

    -

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
-0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    -

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    +

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
+0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    +

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    \[
   \nabla\psi(\mathbf x,t)\cdot \mathbf n=0 \qquad\qquad \forall \mathbf x\in\partial\Omega.
 \]

    This is not a realistic choice of boundary conditions but sufficient for what we want to demonstrate here. We will comment further on this in the Possibilities for extensions section below.

    -

    Finally, we choose $\kappa=1$, and the potential as

    +

    Finally, we choose $\kappa=1$, and the potential as

    \[
   V(\mathbf x)
   =
@@ -689,7 +689,7 @@
   \end{cases}
 \]

    -

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    +

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    The commented program

    Include files

    The program starts with the usual include files, all of which you should have seen before by now:

    @@ -858,7 +858,7 @@
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)

    Implementation of the NonlinearSchroedingerEquation class

    -

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

    +

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

      template <int dim>
      NonlinearSchroedingerEquation<dim>::NonlinearSchroedingerEquation()
      : fe(2)
    @@ -1016,7 +1016,7 @@
     
     
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
    -

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    +

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    The way we solve this here is using a direct solver. We first form the right hand side $r=R\Psi^{(n,1)}$ using the SparseMatrix::vmult() function and put the result into the system_rhs variable. We then call SparseDirectUMFPACK::solver() which takes as argument the matrix $C$ and the right hand side vector and returns the solution in the same vector system_rhs. The final step is then to put the solution so computed back into the solution variable.

      template <int dim>
      void NonlinearSchroedingerEquation<dim>::do_full_spatial_step()
    @@ -1263,7 +1263,7 @@ allowfullscreen>

    -

    So why did I end up shading the area where the potential $V(\mathbf x)$ is large? In that outside region, the solution is relatively small. It is also relatively smooth. As a consequence, to some approximate degree, the equation in that region simplifies to

    +

    So why did I end up shading the area where the potential $V(\mathbf x)$ is large? In that outside region, the solution is relatively small. It is also relatively smooth. As a consequence, to some approximate degree, the equation in that region simplifies to

    \[
   - i \frac{\partial \psi}{\partial t}
   + V \psi
@@ -1286,9 +1286,9 @@
 <p><a class=

    Better linear solvers

    The solver chosen here is just too simple. It is also not efficient. What we do here is give the matrix to a sparse direct solver in every time step and let it find the solution of the linear system. But we know that we could do far better:

    Boundary conditions

    In order to be usable for actual, realistic problems, solvers for the nonlinear Schrödinger equation need to utilize boundary conditions that make sense for the problem at hand. We have here restricted ourselves to simple Neumann boundary conditions – but these do not actually make sense for the problem. Indeed, the equations are generally posed on an infinite domain. But, since we can't compute on infinite domains, we need to truncate it somewhere and instead pose boundary conditions that make sense for this artificially small domain. The approach widely used is to use the Perfectly Matched Layer method that corresponds to a particular kind of attenuation. It is, in a different context, also used in step-62.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-03-17 21:57:47.839261449 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-03-17 21:57:47.839261449 +0000 @@ -136,12 +136,12 @@ \end{align*}" src="form_5958.png"/>

    where $\jump{v} = v^- \mathbf{n}^- + v^+ \mathbf{n}^+ = \mathbf n^{-}
-\left(v^- - v^+\right)$ denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    -

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    +\left(v^- - v^+\right)$" src="form_5959.png"/> denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    +

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    In the implementation below, we implement the weak form above by moving the normal vector $\mathbf{n}^-$ from the jump terms to the derivatives to form a normal derivative of the form $\mathbf{n}^-\cdot \nabla u_h$. This makes the implementation on quadrature points slightly more efficient because we only need to work with scalar terms rather than tensors, and is mathematically equivalent.

    -

    For boundary conditions, we use the so-called mirror principle that defines artificial exterior values $u^+$ by extrapolation from the interior solution $u^-$ combined with the given boundary data, setting $u^+ = -u^- + 2
+<p>For boundary conditions, we use the so-called mirror principle that defines <em>artificial</em> exterior values <picture><source srcset=$u^+$ by extrapolation from the interior solution $u^-$ combined with the given boundary data, setting $u^+ = -u^- + 2
 g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla u^+ =
--\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries, for given Dirichlet values $g_\text{D}$ and Neumann values $g_\text{N}$. These expressions are then inserted in the above weak form. Contributions involving the known quantities $g_\text{D}$ and $g_\text{N}$ are eventually moved to the right hand side, whereas the unknown value $u^-$ is retained on the left hand side and contributes to the matrix terms similarly as interior faces. Upon these manipulations, the same weak form as in step-39 is obtained.

    +-\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$" src="form_5970.png"/> on Neumann boundaries, for given Dirichlet values $g_\text{D}$ and Neumann values $g_\text{N}$. These expressions are then inserted in the above weak form. Contributions involving the known quantities $g_\text{D}$ and $g_\text{N}$ are eventually moved to the right hand side, whereas the unknown value $u^-$ is retained on the left hand side and contributes to the matrix terms similarly as interior faces. Upon these manipulations, the same weak form as in step-39 is obtained.

    Face integration support in MatrixFree and FEFaceEvaluation

    The matrix-free framework of deal.II provides the necessary infrastructure to implement the action of the discretized equation above. As opposed to the MatrixFree::cell_loop() that we used in step-37 and step-48, we now build a code in terms of MatrixFree::loop() that takes three function pointers, one for the cell integrals, one for the inner face integrals, and one for the boundary face integrals (in analogy to the design of MeshWorker used in the step-39 tutorial program). In each of these three functions, we then implement the respective terms on the quadrature points. For interpolation between the vector entries and the values and gradients on quadrature points, we use the class FEEvaluation for cell contributions and FEFaceEvaluation for face contributions. The basic usage of these functions has been discussed extensively in the step-37 tutorial program.

    In MatrixFree::loop(), all interior faces are visited exactly once, so one must make sure to compute the contributions from both the test functions $v_h^-$ and $v_h^+$. Given the fact that the test functions on both sides are indeed independent, the weak form above effectively means that we submit the same contribution to both an FEFaceEvaluation object called phi_inner and phi_outer for testing with the normal derivative of the test function, and values with opposite sign for testing with the values of the test function, because the latter involves opposite signs due to the jump term. For faces between cells of different refinement level, the integration is done from the refined side, and FEFaceEvaluation automatically performs interpolation to a subface on the coarse side. Thus, a hanging node never appears explicitly in a user implementation of a weak form.

    @@ -154,7 +154,7 @@

    This optimization is not only useful for computing the face integrals, but also for the MPI ghost layer exchange: In a naive exchange, we would need to send all degrees of freedom of a cell to another processor if the other processor is responsible for computing the face's contribution. Since we know that only some of the degrees of freedom in the evaluation with FEFaceEvaluation are touched, it is natural to only exchange the relevant ones. The MatrixFree::loop() function has support for a selected data exchange when combined with LinearAlgebra::distributed::Vector. To make this happen, we need to tell the loop what kind of evaluation on faces we are going to do, using an argument of type MatrixFree::DataAccessOnFaces, as can be seen in the implementation of LaplaceOperator::vmult() below. The way data is exchanged in that case is as follows: The ghost layer data in the vector still pretends to represent all degrees of freedom, such that FEFaceEvaluation can continue to read the values as if the cell were a locally owned one. The data exchange routines take care of the task for packing and unpacking the data into this format. While this sounds pretty complicated, we will show in the results section below that this really pays off by comparing the performance to a baseline code that does not specify the data access on faces.

    An approximate block-Jacobi smoother using the fast diagonalization method

    In the tradition of the step-37 program, we again solve a Poisson problem with a geometric multigrid preconditioner inside a conjugate gradient solver. Instead of computing the diagonal and use the basic PreconditionChebyshev as a smoother, we choose a different strategy in this tutorial program. We implement a block-Jacobi preconditioner, where a block refers to all degrees of freedom on a cell. Rather than building the full cell matrix and applying its LU factorization (or inverse) in the preconditioner — an operation that would be heavily memory bandwidth bound and thus pretty slow — we approximate the inverse of the block by a special technique called fast diagonalization method.

    -

    The idea of the method is to take use of the structure of the cell matrix. In case of the Laplacian with constant coefficients discretized on a Cartesian mesh, the cell matrix $L$ can be written as

    +

    The idea of the method is to take use of the structure of the cell matrix. In case of the Laplacian with constant coefficients discretized on a Cartesian mesh, the cell matrix $L$ can be written as

    \begin{align*}
 L &= A_1 \otimes M_0 + M_1 \otimes A_0
 \end{align*} @@ -165,7 +165,7 @@ \end{align*}" src="form_5977.png"/>

    in 3D. The matrices $A_0$ and $A_1$ denote the 1D Laplace matrix (including the cell and face term associated to the current cell values $u^-_h$ and $v^-_h$) and $M_0$ and $M_1$ are the mass matrices. Note that this simple tensor product structure is lost once there are non-constant coefficients on the cell or the geometry is not constant any more. We mention that a similar setup could also be used to replace the computed integrals with this final tensor product form of the matrices, which would cut the operations for the operator evaluation into less than half. However, given the fact that this only holds for Cartesian cells and constant coefficients, which is a pretty narrow case, we refrain from pursuing this idea.

    -

    Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to a method introduced by Lynch et al. [Lynch1964] in 1964,

    +

    Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to a method introduced by Lynch et al. [Lynch1964] in 1964,

    \begin{align*}
 L^{-1} &= S_1 \otimes S_0 (\Lambda_1 \otimes I + I \otimes \Lambda_0)^{-1}
 S_1^\mathrm T \otimes S_0^\mathrm T,
@@ -176,7 +176,7 @@
 A_d s  &= \lambda M_d s, \quad d = 0, \ldots,\mathrm{dim-1},
 \end{align*}

    -

    and $\Lambda_d$ is the diagonal matrix representing the generalized eigenvalues $\lambda$. Note that the vectors $s$ are such that they simultaneously diagonalize $A_d$ and $M_d$, i.e. $S_d^{\mathrm T} A_d S_d =
+<p> and <picture><source srcset=$\Lambda_d$ is the diagonal matrix representing the generalized eigenvalues $\lambda$. Note that the vectors $s$ are such that they simultaneously diagonalize $A_d$ and $M_d$, i.e. $S_d^{\mathrm T} A_d S_d =
 \Lambda_d$ and $S_d^{\mathrm T} M_d S_d = I$.

    The deal.II library implements a class using this concept, called TensorProductMatrixSymmetricSum.

    For the sake of this program, we stick with constant coefficients and Cartesian meshes, even though an approximate version based on tensor products would still be possible for a more general mesh, and the operator evaluation itself is of course generic. Also, we do not bother with adaptive meshes where the multigrid algorithm would need to get access to flux matrices over the edges of different refinement, as explained in step-39. One thing we do, however, is to still wrap our block-Jacobi preconditioner inside PreconditionChebyshev. That class relieves us from finding an appropriate relaxation parameter (which would be around 0.7 in 2D and 0.5 in 3D for the block-Jacobi smoother), and often increases smoothing efficiency somewhat over plain Jacobi smoothing, especially when using several iterations.

    @@ -229,7 +229,7 @@
      const unsigned int dimension = 3;
     

    Equation data

    -

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

    +

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

      template <int dim>
      class Solution : public Function<dim>
      {
    @@ -584,11 +584,11 @@
     
     
     
    -

    The boundary face function follows by and large the interior face function. The only difference is the fact that we do not have a separate FEFaceEvaluation object that provides us with exterior values $u^+$, but we must define them from the boundary conditions and interior values $u^-$. As explained in the introduction, we use $u^+ = -u^- + 2
+</div><!-- fragment --><p>The boundary face function follows by and large the interior face function. The only difference is the fact that we do not have a separate <a class=FEFaceEvaluation object that provides us with exterior values $u^+$, but we must define them from the boundary conditions and interior values $u^-$. As explained in the introduction, we use $u^+ = -u^- + 2
    g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla
    u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla
-   u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    -

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

    + u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$" src="form_5992.png"/> on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    +

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

      template <int dim, int fe_degree, typename number>
      void LaplaceOperator<dim, fe_degree, number>::apply_boundary(
      const MatrixFree<dim, number> & data,
    @@ -699,7 +699,7 @@
      }
     

    Next, we go through the cells and pass the scaled matrices to TensorProductMatrixSymmetricSum to actually compute the generalized eigenvalue problem for representing the inverse: Since the matrix approximation is constructed as $A\otimes M + M\otimes A$ and the weights are constant for each element, we can apply all weights on the Laplace matrix and simply keep the mass matrices unscaled. In the loop over cells, we want to make use of the geometry compression provided by the MatrixFree class and check if the current geometry is the same as on the last cell batch, in which case there is nothing to do. This compression can be accessed by FEEvaluation::get_mapping_data_index_offset() once reinit() has been called.

    -

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

    +

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

      cell_matrices.clear();
      unsigned int old_mapping_data_index = numbers::invalid_unsigned_int;
    @@ -1138,7 +1138,7 @@
    void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const InVector &fe_function, const Function< spacedim, typename InVector::value_type > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    -

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

    +

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

      template <int dim, int fe_degree>
      void LaplaceProblem<dim, fe_degree>::run()
      {
    @@ -1334,7 +1334,7 @@
      L2 Velocity Reduction L2 Pressure Reduction H1 Velocity Reduction
    MDoFs/s 2.94 3.29 3.62 3.72 3.47 3.41 2.93 2.88 2.57 2.27 2.01 1.87
    -

    We clearly see how the efficiency per DoF initially improves until it reaches a maximum for the polynomial degree $k=4$. This effect is surprising, not only because higher polynomial degrees often yield a vastly better solution, but especially also when having matrix-based schemes in mind where the denser coupling at higher degree leads to a monotonously decreasing throughput (and a drastic one in 3D, with $k=4$ being more than ten times slower than $k=1$!). For higher degrees, the throughput decreases a bit, which is both due to an increase in the number of iterations (going from 12 at $k=2,3,4$ to 19 at $k=10$) and due to the $\mathcal O(k)$ complexity of operator evaluation. Nonetheless, efficiency as the time to solution would be still better for higher polynomial degrees because they have better convergence rates (at least for problems as simple as this one): For $k=12$, we reach roundoff accuracy already with 1 million DoFs (solver time less than a second), whereas for $k=8$ we need 24 million DoFs and 8 seconds. For $k=5$, the error is around $10^{-9}$ with 57m DoFs and thus still far away from roundoff, despite taking 16 seconds.

    +

    We clearly see how the efficiency per DoF initially improves until it reaches a maximum for the polynomial degree $k=4$. This effect is surprising, not only because higher polynomial degrees often yield a vastly better solution, but especially also when having matrix-based schemes in mind where the denser coupling at higher degree leads to a monotonously decreasing throughput (and a drastic one in 3D, with $k=4$ being more than ten times slower than $k=1$!). For higher degrees, the throughput decreases a bit, which is both due to an increase in the number of iterations (going from 12 at $k=2,3,4$ to 19 at $k=10$) and due to the $\mathcal O(k)$ complexity of operator evaluation. Nonetheless, efficiency as the time to solution would be still better for higher polynomial degrees because they have better convergence rates (at least for problems as simple as this one): For $k=12$, we reach roundoff accuracy already with 1 million DoFs (solver time less than a second), whereas for $k=8$ we need 24 million DoFs and 8 seconds. For $k=5$, the error is around $10^{-9}$ with 57m DoFs and thus still far away from roundoff, despite taking 16 seconds.

    Note that the above numbers are a bit pessimistic because they include the time it takes the Chebyshev smoother to compute an eigenvalue estimate, which is around 10 percent of the solver time. If the system is solved several times (as e.g. common in fluid dynamics), this eigenvalue cost is only paid once and faster times become available.

    Evaluation of efficiency of ingredients

    Finally, we take a look at some of the special ingredients presented in this tutorial program, namely the FE_DGQHermite basis in particular and the specification of MatrixFree::DataAccessOnFaces. In the following table, the third row shows the optimized solver above, the fourth row shows the timings with only the MatrixFree::DataAccessOnFaces set to unspecified rather than the optimal gradients, and the last one with replacing FE_DGQHermite by the basic FE_DGQ elements where both the MPI exchange are more expensive and the operations done by FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter().

    @@ -1351,7 +1351,7 @@ Solver time FE_DGQ [s] 0.712 2.041 5.066 9.335 2.379 3.802 6.564 9.714 14.54 22.76 4.148 5.857

    The data in the table shows that not using MatrixFree::DataAccessOnFaces increases costs by around 10% for higher polynomial degrees. For lower degrees, the difference is obviously less pronounced because the volume-to-surface ratio is more beneficial and less data needs to be exchanged. The difference is larger when looking at the matrix-vector product only, rather than the full multigrid solver shown here, with around 20% worse timings just because of the MPI communication.

    -

    For $k=1$ and $k=2$, the Hermite-like basis functions do obviously not really pay off (indeed, for $k=1$ the polynomials are exactly the same as for FE_DGQ) and the results are similar as with the FE_DGQ basis. However, for degrees starting at three, we see an increasing advantage for FE_DGQHermite, showing the effectiveness of these basis functions.

    +

    For $k=1$ and $k=2$, the Hermite-like basis functions do obviously not really pay off (indeed, for $k=1$ the polynomials are exactly the same as for FE_DGQ) and the results are similar as with the FE_DGQ basis. However, for degrees starting at three, we see an increasing advantage for FE_DGQHermite, showing the effectiveness of these basis functions.

    Possibilities for extension

    As mentioned in the introduction, the fast diagonalization method as realized here is tied to a Cartesian mesh with constant coefficients. When dealing with meshes that contain deformed cells or with variable coefficients, it is common to determine a nearby Cartesian mesh cell as an approximation. This can be done with the class TensorProductMatrixSymmetricSumCollection. Here, one can insert cell matrices similarly to the PreconditionBlockJacobi::initialize() function of this tutorial program. The benefit of the collection class is that cells on which the coefficient of the PDE has the same value can re-use the same Laplacian matrix, which reduces the memory consumption for the inverse matrices. As compared to the algorithm implemented in this tutorial program, one would define the length scales as the distances between opposing faces. For continuous elements, the code project <a href=https://github.com/peterrum/dealii-dd-and-schwarz">Cache-optimized and low-overhead implementations of multigrid smoothers for high-order FEM /usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-03-17 21:57:47.899261820 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-03-17 21:57:47.899261820 +0000 @@ -170,14 +170,14 @@ \|\nabla(u-u_h)\|_{\Omega} \le C h_\text{max}^p \| \nabla^{p+1} u \|_{\Omega}, \end{align*}" src="form_6004.png"/>

    -

    where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

    +

    where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

    The answer lies in the observation that the formula above is not optimal. In fact, some more work shows that the following is a better estimate (which you should compare to the square of the estimate above):

    \begin{align*}
   \|\nabla(u-u_h)\|_{\Omega}^2 \le C \sum_K h_K^{2p} \| \nabla^{p+1} u \|^2_K.
 \end{align*}

    (Because $h_K\le h_\text{max}$, this formula immediately implies the previous one if you just pull the mesh size out of the sum.) What this formula suggests is that it is not necessary to make the largest cell small, but that the cells really only need to be small where $\| \nabla^{p+1} u \|_K$ is large! In other words: The mesh really only has to be fine where the solution has large variations, as indicated by the $p+1$st derivative. This makes intuitive sense: if, for example, we use a linear element $p=1$, then places where the solution is nearly linear (as indicated by $\nabla^2 u$ being small) will be well resolved even if the mesh is coarse. Only those places where the second derivative is large will be poorly resolved by large elements, and consequently that's where we should make the mesh small.

    -

    Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

    +

    Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

    How to deal with hanging nodes in theory

    The methods using triangular meshes mentioned above go to great lengths to make sure that each vertex is a vertex of all adjacent cells – i.e., that there are no hanging nodes. This then automatically makes sure that we can define shape functions in such a way that they are globally continuous (if we use the common $Q_p$ Lagrange finite element methods we have been using so far in the tutorial programs, as represented by the FE_Q class).

    On the other hand, if we define shape functions on meshes with hanging nodes, we may end up with shape functions that are not continuous. To see this, think about the situation above where the top right cell is not refined, and consider for a moment the use of a bilinear finite element. In that case, the shape functions associated with the hanging nodes are defined in the obvious way on the two small cells adjacent to each of the hanging nodes. But how do we extend them to the big adjacent cells? Clearly, the function's extension to the big cell cannot be bilinear because then it needs to be linear along each edge of the large cell, and that means that it needs to be zero on the entire edge because it needs to be zero on the two vertices of the large cell on that edge. But it is not zero at the hanging node itself when seen from the small cells' side – so it is not continuous. The following three figures show three of the shape functions along the edges in question that turn out to not be continuous when defined in the usual way simply based on the cells they are adjacent to:

    @@ -193,7 +193,7 @@
    A discontinuous shape function adjacent to a hanging node
    -

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    +

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    In the program below, we will show how we can get these constraints from deal.II, and how to use them in the solution of the linear system of equations. Before going over the details of the program below, you may want to take a look at the Constraints on degrees of freedom documentation module that explains how these constraints can be computed and what classes in deal.II work on them.

    How to deal with hanging nodes in practice

    The practice of hanging node constraints is rather simpler than the theory we have outlined above. In reality, you will really only have to add about half a dozen lines of additional code to a program like step-4 to make it work with adaptive meshes that have hanging nodes. The interesting part about this is that it is entirely independent of the equation you are solving: The algebraic nature of these constraints has nothing to do with the equation and only depends on the choice of finite element. As a consequence, the code to deal with these constraints is entirely contained in the deal.II library itself, and you do not need to worry about the details.

    @@ -206,11 +206,11 @@

    These four steps are really all that is necessary – it's that simple from a user perspective. The fact that, in the function calls mentioned above, you will run through several thousand lines of not-so-trivial code is entirely immaterial to this: In user code, there are really only four additional steps.

    How we obtain locally refined meshes

    The next question, now that we know how to deal with meshes that have these hanging nodes is how we obtain them.

    -

    A simple way has already been shown in step-1: If you know where it is necessary to refine the mesh, then you can create one by hand. But in reality, we don't know this: We don't know the solution of the PDE up front (because, if we did, we wouldn't have to use the finite element method), and consequently we do not know where it is necessary to add local mesh refinement to better resolve areas where the solution has strong variations. But the discussion above shows that maybe we can get away with using the discrete solution $u_h$ on one mesh to estimate the derivatives $\nabla^{p+1} u$, and then use this to determine which cells are too large and which already small enough. We can then generate a new mesh from the current one using local mesh refinement. If necessary, this step is then repeated until we are happy with our numerical solution – or, more commonly, until we run out of computational resources or patience.

    +

    A simple way has already been shown in step-1: If you know where it is necessary to refine the mesh, then you can create one by hand. But in reality, we don't know this: We don't know the solution of the PDE up front (because, if we did, we wouldn't have to use the finite element method), and consequently we do not know where it is necessary to add local mesh refinement to better resolve areas where the solution has strong variations. But the discussion above shows that maybe we can get away with using the discrete solution $u_h$ on one mesh to estimate the derivatives $\nabla^{p+1} u$, and then use this to determine which cells are too large and which already small enough. We can then generate a new mesh from the current one using local mesh refinement. If necessary, this step is then repeated until we are happy with our numerical solution – or, more commonly, until we run out of computational resources or patience.

    So that's exactly what we will do. The locally refined grids are produced using an error estimator which estimates the energy error for numerical solutions of the Laplace operator. Since it was developed by Kelly and co-workers, we often refer to it as the “Kelly refinement indicator” in the library, documentation, and mailing list. The class that implements it is called KellyErrorEstimator, and there is a great deal of information to be found in the documentation of that class that need not be repeated here. The summary, however, is that the class computes a vector with as many entries as there are active cells, and where each entry contains an estimate of the error on that cell. This estimate is then used to refine the cells of the mesh: those cells that have a large error will be marked for refinement, those that have a particularly small estimate will be marked for coarsening. We don't have to do this by hand: The functions in namespace GridRefinement will do all of this for us once we have obtained the vector of error estimates.

    It is worth noting that while the Kelly error estimator was developed for Laplace's equation, it has proven to be a suitable tool to generate locally refined meshes for a wide range of equations, not even restricted to elliptic only problems. Although it will create non-optimal meshes for other equations, it is often a good way to quickly produce meshes that are well adapted to the features of solutions, such as regions of great variation or discontinuities.

    Boundary conditions

    -

    It turns out that one can see Dirichlet boundary conditions as just another constraint on the degrees of freedom. It's a particularly simple one, indeed: If $j$ is a degree of freedom on the boundary, with position $\mathbf x_j$, then imposing the boundary condition $u=g$ on $\partial\Omega$ simply yields the constraint $U_j=g({\mathbf x}_j)$.

    +

    It turns out that one can see Dirichlet boundary conditions as just another constraint on the degrees of freedom. It's a particularly simple one, indeed: If $j$ is a degree of freedom on the boundary, with position $\mathbf x_j$, then imposing the boundary condition $u=g$ on $\partial\Omega$ simply yields the constraint $U_j=g({\mathbf x}_j)$.

    The AffineConstraints class can handle such constraints as well, which makes it convenient to let the same object we use for hanging node constraints also deal with these Dirichlet boundary conditions. This way, we don't need to apply the boundary conditions after assembly (like we did in the earlier steps). All that is necessary is that we call the variant of VectorTools::interpolate_boundary_values() that returns its information in an AffineConstraints object, rather than the std::map we have used in previous tutorial programs.

    Other things this program shows

    Since the concepts used for locally refined grids are so important, we do not show much other material in this example. The most important exception is that we show how to use biquadratic elements instead of the bilinear ones which we have used in all previous examples. In fact, the use of higher order elements is accomplished by only replacing three lines of the program, namely the initialization of the fe member variable in the constructor of the main class of this program, and the use of an appropriate quadrature formula in two places. The rest of the program is unchanged.

    @@ -667,8 +667,8 @@

    As we can see, all preconditioners behave pretty much the same on this simple problem, with the number of iterations growing like ${\cal
-O}(N^{1/2})$ and because each iteration requires around ${\cal
-O}(N)$ operations the total CPU time grows like ${\cal
+O}(N^{1/2})$ and because each iteration requires around ${\cal
+O}(N)$ operations the total CPU time grows like ${\cal
 O}(N^{3/2})$ (for the few smallest meshes, the CPU time is so small that it doesn't record). Note that even though it is the simplest method, Jacobi is the fastest for this problem.

    The situation changes slightly when the finite element is not a bi-quadratic one (i.e., polynomial degree two) as selected in the constructor of this program, but a bi-linear one (polynomial degree one). If one makes this change, the results are as follows:

    @@ -676,7 +676,7 @@

    In other words, while the increase in iterations and CPU time is as before, Jacobi is now the method that requires the most iterations; it is still the fastest one, however, owing to the simplicity of the operations it has to perform. This is not to say that Jacobi is actually a good preconditioner – for problems of appreciable size, it is definitely not, and other methods will be substantially better – but really only that it is fast because its implementation is so simple that it can compensate for a larger number of iterations.

    -

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    +

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    Finally, the last message to take home is that when the data shown above was generated (in 2018), linear systems with 100,000 unknowns are easily solved on a desktop or laptop machine in about a second, making the solution of relatively simple 2d problems even to very high accuracy not that big a task as it used to be in the past. At the same time, the situation for 3d problems continues to be quite different: A uniform 2d mesh with 100,000 unknowns corresponds to a grid with about $300 \times 300$ nodes; the corresponding 3d mesh has $300 \times 300 \times 300$ nodes and 30 million unknowns. Because finite element matrices in 3d have many more nonzero entries than in 2d, solving these linear systems will not only take 300 times as much CPU time, but substantially longer. In other words, achieving the same resolution in 3d is quite a large problem, and solving it within a reasonable amount of time will require much more work to implement better linear solvers. As mentioned above, multigrid methods and matrix-free methods (see, for example, step-37), along with parallelization (step-40) will be necessary, but are then also able to comfortably solve such linear systems.

    A better mesh

    If you look at the meshes above, you will see even though the domain is the unit disk, and the jump in the coefficient lies along a circle, the cells that make up the mesh do not track this geometry well. The reason, already hinted at in step-1, is that in the absence of other information, the Triangulation class only sees a bunch of coarse grid cells but has, of course, no real idea what kind of geometry they might represent when looked at together. For this reason, we need to tell the Triangulation what to do when a cell is refined: where should the new vertices at the edge midpoints and the cell midpoint be located so that the child cells better represent the desired geometry than the parent cell.

    @@ -794,15 +794,15 @@ -\Delta u = f \]" src="form_6025.png"/>

    -

    on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies $f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    -

    The situation we chose for the current example is different, however: we look at an equation with a non-constant coefficient $a(\mathbf x)$:

    +

    on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies $f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    +

    The situation we chose for the current example is different, however: we look at an equation with a non-constant coefficient $a(\mathbf x)$:

    \[
   -\nabla \cdot (a \nabla u) = f.
 \]

    -

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    -

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    -

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    +

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    +

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    +

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    To implement something like this, one could replace the coefficient function by the following (shown here only for the 2d case):

    template <int dim>
    double coefficient (const Point<dim> &p)
    {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-03-17 21:57:47.979262314 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-03-17 21:57:47.987262363 +0000 @@ -132,24 +132,24 @@
    Note
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.1243280

    Introduction

    Non-matching grid constraints through distributed Lagrange multipliers

    -

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    +

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    There are two interesting scenarios:

    -

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    +

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    \[
 \gamma : C^0(\Omega) \mapsto C^0(\Gamma), \quad \text{ s.t. } \gamma u = u|_{\Gamma} \in C^0(\Gamma),
 \quad \forall u \in C^0(\Omega).
 \]

    -

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    -

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    -

    The co-dimension two case is a little more complicated, and in general it is not possible to construct a continuous trace operator, not even from $H^1(\Omega)$ to $L^2(\Gamma)$, when the dimension of $\Gamma$ is zero or one respectively in two and three dimensions.

    -

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    -

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    +

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    +

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    +

    The co-dimension two case is a little more complicated, and in general it is not possible to construct a continuous trace operator, not even from $H^1(\Omega)$ to $L^2(\Gamma)$, when the dimension of $\Gamma$ is zero or one respectively in two and three dimensions.

    +

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    +

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    \begin{eqnarray*}
 - \Delta u + \gamma^T \lambda &=& f  \text{ in } \Omega\\
@@ -157,35 +157,35 @@
 u & = & u_D \text{ on } \partial\Omega.
 \end{eqnarray*}

    -

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    -

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    -

    As a first example we study the zero Dirichlet boundary condition on $\partial\Omega$. The same equations apply if we apply zero Neumann boundary conditions on $\partial\Omega$ or a mix of the two.

    -

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    +

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    +

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    +

    As a first example we study the zero Dirichlet boundary condition on $\partial\Omega$. The same equations apply if we apply zero Neumann boundary conditions on $\partial\Omega$ or a mix of the two.

    +

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    Multiplying the first equation by $v \in V(\Omega)$ and the second by $q \in
-Q(\Gamma)$, integrating by parts when possible, and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

    -

    Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to

    +Q(\Gamma)$" src="form_6049.png"/>, integrating by parts when possible, and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

    +

    Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to

    \begin{eqnarray*}
 (\nabla u, \nabla v)_{\Omega} + (\lambda, \gamma v)_{\Gamma} &=& (f,v)_{\Omega} \qquad \forall v \in V(\Omega) \\
 (\gamma u, q)_{\Gamma} &=& (g,q)_{\Gamma} \qquad \forall q \in Q(\Gamma),
 \end{eqnarray*}

    -

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    -

    Inspection of the variational formulation tells us that the space $V(\Omega)$ can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case should be taken as $H^{1/2}(\Gamma)$.

    -

    The function $g$ should therefore be either in $H^1(\Gamma)$ (for the co-dimension zero case) or $H^{1/2}(\Gamma)$ (for the co-dimension one case). This leaves us with a Lagrange multiplier $\lambda$ in $Q^*(\Gamma)$, which is either $H^{-1}(\Gamma)$ or $H^{-1/2}(\Gamma)$.

    -

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    -

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    +

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    +

    Inspection of the variational formulation tells us that the space $V(\Omega)$ can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case should be taken as $H^{1/2}(\Gamma)$.

    +

    The function $g$ should therefore be either in $H^1(\Gamma)$ (for the co-dimension zero case) or $H^{1/2}(\Gamma)$ (for the co-dimension one case). This leaves us with a Lagrange multiplier $\lambda$ in $Q^*(\Gamma)$, which is either $H^{-1}(\Gamma)$ or $H^{-1/2}(\Gamma)$.

    +

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    +

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    To solve more complex problems, for example one where the domain $\Gamma$ is time dependent, the second option could be a more viable solution. Handling non aligned meshes is complex by itself: to illustrate how is done we study a simple problem.

    The technique we describe here is presented in the literature using one of many names: the immersed finite element method, the fictitious boundary method, the distributed Lagrange multiplier method, and others. The main principle is that the discretization of the two grids and of the two finite element spaces are kept completely independent. This technique is particularly efficient for the simulation of fluid-structure interaction problems, where the configuration of the embedded structure is part of the problem itself, and one solves a (possibly non-linear) elastic problem to determine the (time dependent) configuration of $\Gamma$, and a (possibly non-linear) flow problem in $\Omega
 \setminus \Gamma$, plus coupling conditions on the interface between the fluid and the solid.

    In this tutorial program we keep things a little simpler, and we assume that the configuration of the embedded domain is given in one of two possible ways:

    -

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    -

    The Lagrange multiplier $\lambda$ and the user supplied function $g$ are defined through another finite dimensional space embedded_dh, and through another FiniteElement embedded_fe, using the same reference domain. In order to take into account the deformation of the domain, either a MappingFEField or a MappingQEulerian object are initialized with the embedded_configuration vector.

    +

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    +

    The Lagrange multiplier $\lambda$ and the user supplied function $g$ are defined through another finite dimensional space embedded_dh, and through another FiniteElement embedded_fe, using the same reference domain. In order to take into account the deformation of the domain, either a MappingFEField or a MappingQEulerian object are initialized with the embedded_configuration vector.

    In the embedding space, a standard finite dimensional space space_dh is constructed on the embedding grid space_grid, using the FiniteElement space_fe, following almost verbatim the approach taken in step-6.

    -

    We represent the discretizations of the spaces $V$ and $Q$ with

    +

    We represent the discretizations of the spaces $V$ and $Q$ with

    \[
 V_h(\Omega) = \text{span} \{v_i\}_{i=1}^n
 \] @@ -195,7 +195,7 @@ Q_h(\Gamma) = \text{span} \{q_i\}_{i=1}^m \]" src="form_6065.png"/>

    -

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    +

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    Once all the finite dimensional spaces are defined, the variational formulation of the problem above leaves us with the following finite dimensional system of equations:

    \[
@@ -223,7 +223,7 @@
 G_{\alpha} &\dealcoloneq& (g, q_\alpha)_\Gamma \qquad \alpha = 1,\dots, m.
 \end{eqnarray*}

    -

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    +

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    In particular, the integral that appears in the computation of a single entry of $C$, is computed on $\Gamma$. As usual in finite elements we split this integral into contributions from all cells of the triangulation used to discretize $\Gamma$, we transform the integral on $K$ to an integral on the reference element $\hat K$, where $F_{K}$ is the mapping from $\hat K$ to $K$, and compute the integral on $\hat K$ using a quadrature formula:

    \[
@@ -233,14 +233,14 @@
 \]

    Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
-(\hat x_i)$. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    +(\hat x_i)$" src="form_6071.png"/>. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    To evaluate $(v_j \circ F_{K}) (\hat x_i)$ the following steps needs to be taken (as shown in the picture below):

    @@ -259,8 +259,8 @@

    The problem we solve here is identical to step-4, with the difference that we impose some constraints on an embedded domain $\Gamma$. The tutorial is written in a dimension independent way, and in the results section we show how to vary both dim and spacedim.

    The tutorial is compiled for dim equal to one and spacedim equal to two. If you want to run the program in embedding dimension spacedim equal to three, you will most likely want to change the reference domain for $\Gamma$ to be, for example, something you read from file, or a closed sphere that you later deform to something more interesting.

    In the default scenario, $\Gamma$ has co-dimension one, and this tutorial program implements the Fictitious Boundary Method. As it turns out, the same techniques are used in the Variational Immersed Finite Element Method, and the coupling operator $C$ defined above is the same in almost all of these non-matching methods.

    -

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    -

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    +

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    +

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    References

    DistributedLagrangeProblem

    -

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    -

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    +

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    +

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    A novelty with respect to other tutorial programs is the heavy use of std::unique_ptr. These behave like classical pointers, with the advantage of doing automatic house-keeping: the contained object is automatically destroyed as soon as the unique_ptr goes out of scope, even if it is inside a container or there's an exception. Moreover it does not allow for duplicate pointers, which prevents ownership problems. We do this, because we want to be able to i) construct the problem, ii) read the parameters, and iii) initialize all objects according to what is specified in a parameter file.

    We construct the parameters of our problem in the internal class Parameters, derived from ParameterAcceptor. The DistributedLagrangeProblem class takes a const reference to a Parameters object, so that it is not possible to modify the parameters from within the DistributedLagrangeProblem class itself.

    We could have initialized the parameters first, and then pass the parameters to the DistributedLagrangeProblem assuming all entries are set to the desired values, but this has two disadvantages:

    @@ -357,16 +357,16 @@
     
    ParameterAcceptor
    Definition parameter_acceptor.h:361

    The parameters now described can all be set externally using a parameter file: if no parameter file is present when running the executable, the program will create a "parameters.prm" file with the default values defined here, and then abort to give the user a chance to modify the parameters.prm file.

    -

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

    +

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

      unsigned int initial_refinement = 4;
     
    -

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

    +

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

      unsigned int delta_refinement = 3;
     

    Starting refinement of the embedded grid, corresponding to the domain $\Gamma$.

      unsigned int initial_embedded_refinement = 8;
     
    -

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

    +

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

      std::list<types::boundary_id> dirichlet_ids{0, 1, 2, 3};
     

    FiniteElement degree of the embedding space: $V_h(\Omega)$

    @@ -445,7 +445,7 @@
      std::unique_ptr<Mapping<dim, spacedim>> embedded_mapping;
     
    ParameterAcceptorProxy
    Definition parameter_acceptor.h:699
    -

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

    +

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

      ParameterAcceptorProxy<Functions::ParsedFunction<spacedim>>
      embedding_rhs_function;
     
    @@ -594,7 +594,7 @@
      {
    STL namespace.

    Here is a way to set default values for a ParameterAcceptor class that was constructed using ParameterAcceptorProxy.

    -

    In this case, we set the default deformation of the embedded grid to be a circle with radius $R$ and center $(Cx, Cy)$, we set the default value for the embedded_value_function to be the constant one, and specify some sensible values for the SolverControl object.

    +

    In this case, we set the default deformation of the embedded grid to be a circle with radius $R$ and center $(Cx, Cy)$, we set the default value for the embedded_value_function to be the constant one, and specify some sensible values for the SolverControl object.

    It is fundamental for $\Gamma$ to be embedded: from the definition of $C_{\alpha j}$ is clear that, if $\Gamma \not\subseteq \Omega$, certain rows of the matrix $C$ will be zero. This would be a problem, as the Schur complement method requires $C$ to have full column rank.

      embedded_configuration_function.declare_parameters_call_back.connect(
      []() -> void {
    @@ -631,7 +631,7 @@
      TimerOutput::Scope timer_section(monitor, "Setup grids and dofs");
     
    Definition timer.h:558
    -

    Initializing $\Omega$: constructing the Triangulation and wrapping it into a std::unique_ptr object

    +

    Initializing $\Omega$: constructing the Triangulation and wrapping it into a std::unique_ptr object

      space_grid = std::make_unique<Triangulation<spacedim>>();
     

    Next, we actually create the triangulation using GridGenerator::hyper_cube(). The last argument is set to true: this activates colorization (i.e., assigning different boundary indicators to different parts of the boundary), which we use to assign the Dirichlet and Neumann conditions.

    @@ -682,7 +682,7 @@
     
      setup_embedded_dofs();
     
    -

    In this tutorial program we not only refine $\Omega$ globally, but also allow a local refinement depending on the position of $\Gamma$, according to the value of parameters.delta_refinement, that we use to decide how many rounds of local refinement we should do on $\Omega$, corresponding to the position of $\Gamma$.

    +

    In this tutorial program we not only refine $\Omega$ globally, but also allow a local refinement depending on the position of $\Gamma$, according to the value of parameters.delta_refinement, that we use to decide how many rounds of local refinement we should do on $\Omega$, corresponding to the position of $\Gamma$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-03-17 21:57:48.071262882 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-03-17 21:57:48.075262907 +0000 @@ -163,15 +163,15 @@ \qquad \mathbf{x} \in \Gamma^N, \end{align*}" src="form_6088.png"/>

    -

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    -

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    +

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    +

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    Weak Galerkin scheme

    -

    The Poisson equation above has a solution $p$ that needs to satisfy the weak formulation of the problem,

    +

    The Poisson equation above has a solution $p$ that needs to satisfy the weak formulation of the problem,

    \begin{equation*}
 \mathcal{A}\left(p,q \right) = \mathcal{F} \left(q \right),
 \end{equation*}

    -

    for all test functions $q$, where

    +

    for all test functions $q$, where

    \begin{equation*}
 \mathcal{A}\left(p,q\right)
   \dealcoloneq \int_\Omega \left(\mathbf{K} \nabla p\right) \cdot \nabla q \;\mathrm{d}x,
@@ -184,14 +184,14 @@
   - \int_{\Gamma^N} u_N q \; \mathrm{d}x.
 \end{equation*}

    -

    Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of $p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    -

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    -

    We would then like to simply stick this approximation into the bilinear form above. This works for the case where we have to evaluate the test function $q_h$ on the boundary (where we would simply take its interface part $q_h^\partial$) but we have to be careful with the gradient because that is only defined in cell interiors. Consequently, the weak Galerkin scheme for the Poisson equation is defined by

    +

    Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of $p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    +

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    +

    We would then like to simply stick this approximation into the bilinear form above. This works for the case where we have to evaluate the test function $q_h$ on the boundary (where we would simply take its interface part $q_h^\partial$) but we have to be careful with the gradient because that is only defined in cell interiors. Consequently, the weak Galerkin scheme for the Poisson equation is defined by

    \begin{equation*}
 \mathcal{A}_h\left(p_h,q \right) = \mathcal{F} \left(q_h \right),
 \end{equation*}

    -

    for all discrete test functions $q_h$, where

    +

    for all discrete test functions $q_h$, where

    \begin{equation*}
 \mathcal{A}_h\left(p_h,q_h\right)
   \dealcoloneq \sum_{K \in \mathbb{T}}
@@ -228,7 +228,7 @@
   p_h(\mathbf x) = \sum_j P_j \varphi_j(\mathbf x).
 \end{equation*}

    -

    Here, since $p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation module on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    +

    Here, since $p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation module on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    \begin{equation*}
 \mathcal{A}_h\left(p_h,q_h\right)
   = \sum_{K \in \mathbb{T}}
@@ -279,7 +279,7 @@
   \left(C^K\right)^T = \left(M^K\right)^{-1} G^K.
 \end{equation*}

    -

    (In this last step, we have assumed that the indices $i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    +

    (In this last step, we have assumed that the indices $i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    \begin{equation*}
   C^K = \left(G^K\right)^{T} \left(M^K\right)^{-1}.
 \end{equation*} @@ -315,7 +315,7 @@ \mathbf v_l|_K. \end{equation*}" src="form_6142.png"/>

    -

    So, if we have the matrix $C^K$ for each cell $K$, then we can easily compute the contribution $A^K$ for cell $K$ to the matrix $A$ as follows:

    +

    So, if we have the matrix $C^K$ for each cell $K$, then we can easily compute the contribution $A^K$ for cell $K$ to the matrix $A$ as follows:

    \begin{equation*}
   A^K_{ij} =
     \sum_k \sum_l C_{ik}^K C_{jl}^K
@@ -338,9 +338,9 @@
     \mathbf v_l|_K,
 \end{equation*}

    -

    which is really just the mass matrix on cell $K$ using the Raviart-Thomas basis and weighting by the permeability tensor $\mathbf K$. The derivation here then shows that the weak Galerkin method really just requires us to compute these $C^K$ and $H^K$ matrices on each cell $K$, and then $A^K = C^K H^K (C^K)^T$, which is easily computed. The code to be shown below does exactly this.

    -

    Having so computed the contribution $A^K$ of cell $K$ to the global matrix, all we have to do is to "distribute" these local contributions into the global matrix. How this is done is first shown in step-3 and step-4. In the current program, this will be facilitated by calling AffineConstraints::distribute_local_to_global().

    -

    A linear system of course also needs a right hand side. There is no difficulty associated with computing the right hand side here other than the fact that we only need to use the cell-interior part $\varphi_i^\circ$ for each shape function $\varphi_i$.

    +

    which is really just the mass matrix on cell $K$ using the Raviart-Thomas basis and weighting by the permeability tensor $\mathbf K$. The derivation here then shows that the weak Galerkin method really just requires us to compute these $C^K$ and $H^K$ matrices on each cell $K$, and then $A^K = C^K H^K (C^K)^T$, which is easily computed. The code to be shown below does exactly this.

    +

    Having so computed the contribution $A^K$ of cell $K$ to the global matrix, all we have to do is to "distribute" these local contributions into the global matrix. How this is done is first shown in step-3 and step-4. In the current program, this will be facilitated by calling AffineConstraints::distribute_local_to_global().

    +

    A linear system of course also needs a right hand side. There is no difficulty associated with computing the right hand side here other than the fact that we only need to use the cell-interior part $\varphi_i^\circ$ for each shape function $\varphi_i$.

    Post-processing and L2-errors

    The discussions in the previous sections have given us a linear system that we can solve for the numerical pressure $p_h$. We can use this to compute an approximation to the variable $\mathbf u = -{\mathbf K}\nabla p$ that corresponds to the velocity with which the medium flows in a porous medium if this is the model we are trying to solve. This kind of step – computing a derived quantity from the solution of the discrete problem – is typically called "post-processing".

    Here, instead of using the exact gradient of $p_h$, let us instead use the discrete weak gradient of $p_h$ to calculate the velocity on each element. As discussed above, on each element the gradient of the numerical pressure $\nabla p$ can be approximated by discrete weak gradients $ \nabla_{w,d}\phi_i$:

    @@ -358,7 +358,7 @@ \end{align*}" src="form_6153.png"/>

    where $C^K$ is the expansion matrix from above, and $\mathbf{v}_j$ is the basis function of the $RT$ space on a cell.

    -

    Unfortunately, $\mathbf{K} \mathbf{v}_j$ may not be in the $RT$ space (unless, of course, if $\mathbf K$ is constant times the identity matrix). So, in order to represent it in a finite element program, we need to project it back into a finite dimensional space we can work with. Here, we will use the $L_2$-projection to project it back to the (broken) $RT$ space.

    +

    Unfortunately, $\mathbf{K} \mathbf{v}_j$ may not be in the $RT$ space (unless, of course, if $\mathbf K$ is constant times the identity matrix). So, in order to represent it in a finite element program, we need to project it back into a finite dimensional space we can work with. Here, we will use the $L_2$-projection to project it back to the (broken) $RT$ space.

    We define the projection as $ \mathbf{Q}_h \left( \mathbf{K}\mathbf{v}_j \right) =
 \sum_{k} d_{jk}\mathbf{v}_k$ on each cell $K$. For any $j$, $\left( \mathbf{Q}_h \left( \mathbf{Kv}_j \right),\mathbf{v}_k \right)_K =
 \left( \mathbf{Kv}_j,\mathbf{v}_k \right)_K.$ So, rather than the formula shown above, the numerical velocity on cell $K$ instead becomes

    @@ -399,7 +399,7 @@ \frac{|K|}{|\gamma|} \|\mathbf{u} \cdot \mathbf{n} - \mathbf{u}_h \cdot \mathbf{n}\|_{L_2(\gamma)}^2, \end{align*}" src="form_6166.png"/>

    -

    where $| K |$ is the area of the element, $\gamma$ are faces of the element, $\mathbf{n}$ are unit normal vectors of each face. The last of these norms measures the accuracy of the normal component of the velocity vectors over the interfaces between the cells of the mesh. The scaling factor $|K|/|\gamma|$ is chosen so as to scale out the difference in the length (or area) of the collection of interfaces as the mesh size changes.

    +

    where $| K |$ is the area of the element, $\gamma$ are faces of the element, $\mathbf{n}$ are unit normal vectors of each face. The last of these norms measures the accuracy of the normal component of the velocity vectors over the interfaces between the cells of the mesh. The scaling factor $|K|/|\gamma|$ is chosen so as to scale out the difference in the length (or area) of the collection of interfaces as the mesh size changes.

    The first of these errors above is easily computed using VectorTools::integrate_difference. The others require a bit more work and are implemented in the code below.

    The commented program

    Include files

    @@ -493,7 +493,7 @@
    Vector
    Definition vector.h:109
    triangulation
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
    Definition p4est_wrappers.cc:69

    Right hand side, boundary values, and exact solution

    -

    Next, we define the coefficient matrix $\mathbf{K}$ (here, the identity matrix), Dirichlet boundary conditions, the right-hand side $f = 2\pi^2 \sin(\pi x) \sin(\pi y)$, and the exact solution that corresponds to these choices for $K$ and $f$, namely $p =
+<p>Next, we define the coefficient matrix <picture><source srcset=$\mathbf{K}$ (here, the identity matrix), Dirichlet boundary conditions, the right-hand side $f = 2\pi^2 \sin(\pi x) \sin(\pi y)$, and the exact solution that corresponds to these choices for $K$ and $f$, namely $p =
    \sin(\pi x) \sin(\pi y)$.

      template <int dim>
      class Coefficient : public TensorFunction<2, dim>
    @@ -852,7 +852,7 @@

    cell_matrix_C is then the matrix product between the transpose of $G^K$ and the inverse of the mass matrix (where this inverse is stored in cell_matrix_M):

      cell_matrix_G.Tmmult(cell_matrix_C, cell_matrix_M);
     
    -

    Finally we can compute the local matrix $A^K$. Element $A^K_{ij}$ is given by $\int_{E} \sum_{k,l} C_{ik} C_{jl}
+</div><!-- fragment --><p>Finally we can compute the local matrix <picture><source srcset=$A^K$. Element $A^K_{ij}$ is given by $\int_{E} \sum_{k,l} C_{ik} C_{jl}
    (\mathbf{K} \mathbf{v}_k) \cdot \mathbf{v}_l
    \mathrm{d}x$. We have calculated the coefficients $C$ in the previous step, and so obtain the following after suitably re-arranging the loops:

      local_matrix = 0;
    @@ -1006,7 +1006,7 @@
      }
      }
     
    -

    To compute the matrix $D$ mentioned in the introduction, we then need to evaluate $D=M^{-1}E$ as explained in the introduction:

    +

    To compute the matrix $D$ mentioned in the introduction, we then need to evaluate $D=M^{-1}E$ as explained in the introduction:

      cell_matrix_M.gauss_jordan();
      cell_matrix_M.mmult(cell_matrix_D, cell_matrix_E);
     
    @@ -1303,7 +1303,7 @@
      return 0;
      }

    Results

    -

    We run the program with a right hand side that will produce the solution $p = \sin(\pi x) \sin(\pi y)$ and with homogeneous Dirichlet boundary conditions in the domain $\Omega = (0,1)^2$. In addition, we choose the coefficient matrix in the differential operator $\mathbf{K}$ as the identity matrix. We test this setup using $\mbox{WG}(Q_0,Q_0;RT_{[0]})$, $\mbox{WG}(Q_1,Q_1;RT_{[1]})$ and $\mbox{WG}(Q_2,Q_2;RT_{[2]})$ element combinations, which one can select by using the appropriate constructor argument for the WGDarcyEquation object in main(). We will then visualize pressure values in interiors of cells and on faces. We want to see that the pressure maximum is around 1 and the minimum is around 0. With mesh refinement, the convergence rates of pressure, velocity and flux should then be around 1 for $\mbox{WG}(Q_0,Q_0;RT_{[0]})$ , 2 for $\mbox{WG}(Q_1,Q_1;RT_{[1]})$, and 3 for $\mbox{WG}(Q_2,Q_2;RT_{[2]})$.

    +

    We run the program with a right hand side that will produce the solution $p = \sin(\pi x) \sin(\pi y)$ and with homogeneous Dirichlet boundary conditions in the domain $\Omega = (0,1)^2$. In addition, we choose the coefficient matrix in the differential operator $\mathbf{K}$ as the identity matrix. We test this setup using $\mbox{WG}(Q_0,Q_0;RT_{[0]})$, $\mbox{WG}(Q_1,Q_1;RT_{[1]})$ and $\mbox{WG}(Q_2,Q_2;RT_{[2]})$ element combinations, which one can select by using the appropriate constructor argument for the WGDarcyEquation object in main(). We will then visualize pressure values in interiors of cells and on faces. We want to see that the pressure maximum is around 1 and the minimum is around 0. With mesh refinement, the convergence rates of pressure, velocity and flux should then be around 1 for $\mbox{WG}(Q_0,Q_0;RT_{[0]})$ , 2 for $\mbox{WG}(Q_1,Q_1;RT_{[1]})$, and 3 for $\mbox{WG}(Q_2,Q_2;RT_{[2]})$.

    Test results on WG(Q0,Q0;RT[0])

    The following figures show interior pressures and face pressures using the $\mbox{WG}(Q_0,Q_0;RT_{[0]})$ element. The mesh is refined 2 times (top) and 4 times (bottom), respectively. (This number can be adjusted in the make_grid() function.) When the mesh is coarse, one can see the face pressures $p^\partial$ neatly between the values of the interior pressures $p^\circ$ on the two adjacent cells.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 2024-03-17 21:57:48.179263549 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 2024-03-17 21:57:48.183263574 +0000 @@ -154,10 +154,10 @@
    Note
    As a prerequisite of this program, you need to have HDF5, complex PETSc, and the p4est libraries installed. The installation of deal.II together with these additional libraries is described in the README file.

    Introduction

    A phononic crystal is a periodic nanostructure that modifies the motion of mechanical vibrations or phonons. Phononic structures can be used to disperse, route and confine mechanical vibrations. These structures have potential applications in quantum information and have been used to study macroscopic quantum phenomena. Phononic crystals are usually fabricated in cleanrooms.

    -

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    +

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    Phononic superlattice cavity

    In this tutorial we calculate the band gap and the mechanical resonance of a phononic superlattice cavity but the code presented here can be easily used to design and calculate other types of phononic crystals.

    -

    The device is a waveguide in which the wave goes from left to right. The simulations of this tutorial are done in 2D, but the code is dimension independent and can be easily used with 3D simulations. The waveguide width is equal to the $y$ dimension of the domain and the waveguide length is equal to the $x$ dimension of the domain. There are two regimes that depend on the waveguide width:

    @@ -657,7 +657,7 @@
      Assert(component == 0, ExcIndexRange(component, 0, 1));
      (void)component;
     
    -

    Set boundary to 1 if $x=1$, or if $x>0.5$ and $y=-1$.

    +

    Set boundary to 1 if $x=1$, or if $x>0.5$ and $y=-1$.

      if (std::fabs(p[0] - 1) < 1e-8 ||
      (std::fabs(p[1] + 1) < 1e-8 && p[0] >= 0.5))
      {
    @@ -945,7 +945,7 @@
      right_hand_side.value_list(scratch_data.fe_values.get_quadrature_points(),
      rhs_values);
     
    -

    If we are using streamline diffusion we must add its contribution to both the cell matrix and the cell right-hand side. If we are not using streamline diffusion, setting $\delta=0$ negates this contribution below and we are left with the standard, Galerkin finite element assembly.

    +

    If we are using streamline diffusion we must add its contribution to both the cell matrix and the cell right-hand side. If we are not using streamline diffusion, setting $\delta=0$ negates this contribution below and we are left with the standard, Galerkin finite element assembly.

      const double delta = (settings.with_streamline_diffusion ?
      compute_stabilization_delta(cell->diameter(),
      settings.epsilon,
    @@ -1062,7 +1062,7 @@
    level_cell_iterator begin_mg(const unsigned int level=0) const
    Definition index_set.h:67
    IndexSet extract_locally_relevant_level_dofs(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int level)
    -

    If $(i,j)$ is an interface_out dof pair, then $(j,i)$ is an interface_in dof pair. Note: For interface_in, we load the transpose of the interface entries, i.e., the entry for dof pair $(j,i)$ is stored in interface_in(i,j). This is an optimization for the symmetric case which allows only one matrix to be used when setting the edge_matrices in solve(). Here, however, since our problem is non-symmetric, we must store both interface_in and interface_out matrices.

    +

    If $(i,j)$ is an interface_out dof pair, then $(j,i)$ is an interface_in dof pair. Note: For interface_in, we load the transpose of the interface entries, i.e., the entry for dof pair $(j,i)$ is stored in interface_in(i,j). This is an optimization for the symmetric case which allows only one matrix to be used when setting the edge_matrices in solve(). Here, however, since our problem is non-symmetric, we must store both interface_in and interface_out matrices.

      for (unsigned int i = 0; i < copy_data.dofs_per_cell; ++i)
      for (unsigned int j = 0; j < copy_data.dofs_per_cell; ++j)
      if (mg_constrained_dofs.is_interface_matrix_entry(
    @@ -1424,8 +1424,8 @@
      }

    Results

    GMRES Iteration Numbers

    -

    The major advantage for GMG is that it is an $\mathcal{O}(n)$ method, that is, the complexity of the problem increases linearly with the problem size. To show then that the linear solver presented in this tutorial is in fact $\mathcal{O}(n)$, all one needs to do is show that the iteration counts for the GMRES solve stay roughly constant as we refine the mesh.

    -

    Each of the following tables gives the GMRES iteration counts to reduce the initial residual by a factor of $10^8$. We selected a sufficient number of smoothing steps (based on the method) to get iteration numbers independent of mesh size. As can be seen from the tables below, the method is indeed $\mathcal{O}(n)$.

    +

    The major advantage for GMG is that it is an $\mathcal{O}(n)$ method, that is, the complexity of the problem increases linearly with the problem size. To show then that the linear solver presented in this tutorial is in fact $\mathcal{O}(n)$, all one needs to do is show that the iteration counts for the GMRES solve stay roughly constant as we refine the mesh.

    +

    Each of the following tables gives the GMRES iteration counts to reduce the initial residual by a factor of $10^8$. We selected a sufficient number of smoothing steps (based on the method) to get iteration numbers independent of mesh size. As can be seen from the tables below, the method is indeed $\mathcal{O}(n)$.

    DoF/Cell Renumbering

    The point-wise smoothers ("Jacobi" and "SOR") get applied in the order the DoFs are numbered on each level. We can influence this using the DoFRenumbering namespace. The block smoothers are applied based on the ordering we set in setup_smoother(). We can visualize this numbering. The following pictures show the cell numbering of the active cells in downstream, random, and upstream numbering (left to right):

    @@ -1481,7 +1481,7 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 2024-03-17 21:57:48.355264636 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 2024-03-17 21:57:48.359264661 +0000 @@ -127,12 +127,12 @@

    While we have tried for the interface of the matrix-free classes for the CPU and the GPU to be as close as possible, there are a few differences. When using the matrix-free framework on a GPU, one must write some CUDA code. However, the amount is fairly small and the use of CUDA is limited to a few keywords.

    The test case

    In this example, we consider the Helmholtz problem

    -\begin{eqnarray*} - \nabla \cdot
-\nabla u + a(\mathbf x) u &=&1,\\ u &=& 0 \quad \text{on } \partial \Omega \end{eqnarray*} +\begin{eqnarray*} - \nabla \cdot
+\nabla u + a(\mathbf x) u &=&1,\\ u &=& 0 \quad \text{on } \partial \Omega \end{eqnarray*}

    -

    where $a(\mathbf x)$ is a variable coefficient.

    -

    We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{10}{0.05 +
-2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

    +

    where $a(\mathbf x)$ is a variable coefficient.

    +

    We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{10}{0.05 +
+2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

    If you've made it this far into the tutorial, you will know how the weak formulation of this problem looks like and how, in principle, one assembles linear systems for it. Of course, in this program we will in fact not actually form the matrix, but rather only represent its action when one multiplies with it.

    Moving data to and from the device

    GPUs (we will use the term "device" from now on to refer to the GPU) have their own memory that is separate from the memory accessible to the CPU (we will use the term "host" from now on). A normal calculation on the device can be divided in three separate steps:

      @@ -256,8 +256,8 @@
       
       
      constexpr T pow(const T base, const int iexp)
      Definition utilities.h:447
      -

      The following function implements this coefficient. Recall from the introduction that we have defined it as $a(\mathbf
-   x)=\frac{10}{0.05 + 2\|\mathbf x\|^2}$

      +

      The following function implements this coefficient. Recall from the introduction that we have defined it as $a(\mathbf
+   x)=\frac{10}{0.05 + 2\|\mathbf x\|^2}$

        template <int dim, int fe_degree>
        VaryingCoefficientFunctor<dim, fe_degree>::operator()(
      @@ -312,9 +312,9 @@
       

      The Helmholtz problem we want to solve here reads in weak form as follows:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
    (\nabla v, \nabla u)+ (v, a(\mathbf x) u) &=&(v,1) \quad \forall v.
-   \end{eqnarray*} + \end{eqnarray*}" src="form_6273.png"/>

      If you have seen step-37, then it will be obvious that the two terms on the left-hand side correspond to the two function calls here:

        template <int dim, int fe_degree>
      @@ -656,7 +656,7 @@

      The output results function is as usual since we have already copied the values back from the GPU to the CPU.

      -

      While we're already doing something with the function, we might as well compute the $L_2$ norm of the solution. We do this by calling VectorTools::integrate_difference(). That function is meant to compute the error by evaluating the difference between the numerical solution (given by a vector of values for the degrees of freedom) and an object representing the exact solution. But we can easily compute the $L_2$ norm of the solution by passing in a zero function instead. That is, instead of evaluating the error $\|u_h-u\|_{L_2(\Omega)}$, we are just evaluating $\|u_h-0\|_{L_2(\Omega)}=\|u_h\|_{L_2(\Omega)}$ instead.

      +

      While we're already doing something with the function, we might as well compute the $L_2$ norm of the solution. We do this by calling VectorTools::integrate_difference(). That function is meant to compute the error by evaluating the difference between the numerical solution (given by a vector of values for the degrees of freedom) and an object representing the exact solution. But we can easily compute the $L_2$ norm of the solution by passing in a zero function instead. That is, instead of evaluating the error $\|u_h-u\|_{L_2(\Omega)}$, we are just evaluating $\|u_h-0\|_{L_2(\Omega)}=\|u_h\|_{L_2(\Omega)}$ instead.

        template <int dim, int fe_degree>
        void HelmholtzProblem<dim, fe_degree>::output_results(
        const unsigned int cycle) const
      @@ -791,7 +791,7 @@
      Number of degrees of freedom: 117649
      Solved in 227 iterations.
      solution norm: 0.0205261
      -

      One can make two observations here: First, the norm of the numerical solution converges, presumably to the norm of the exact (but unknown) solution. And second, the number of iterations roughly doubles with each refinement of the mesh. (This is in keeping with the expectation that the number of CG iterations grows with the square root of the condition number of the matrix; and that we know that the condition number of the matrix of a second-order differential operation grows like ${\cal O}(h^{-2})$.) This is of course rather inefficient, as an optimal solver would have a number of iterations that is independent of the size of the problem. But having such a solver would require using a better preconditioner than the identity matrix we have used here.

      +

      One can make two observations here: First, the norm of the numerical solution converges, presumably to the norm of the exact (but unknown) solution. And second, the number of iterations roughly doubles with each refinement of the mesh. (This is in keeping with the expectation that the number of CG iterations grows with the square root of the condition number of the matrix; and that we know that the condition number of the matrix of a second-order differential operation grows like ${\cal O}(h^{-2})$.) This is of course rather inefficient, as an optimal solver would have a number of iterations that is independent of the size of the problem. But having such a solver would require using a better preconditioner than the identity matrix we have used here.

      Possibilities for extensions

      Currently, this program uses no preconditioner at all. This is mainly since constructing an efficient matrix-free preconditioner is non-trivial. However, simple choices just requiring the diagonal of the corresponding matrix are good candidates and these can be computed in a matrix-free way as well. Alternatively, and maybe even better, one could extend the tutorial to use multigrid with Chebyshev smoothers similar to step-37.

      The plain program

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 2024-03-17 21:57:48.419265031 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 2024-03-17 21:57:48.419265031 +0000 @@ -192,14 +192,14 @@ (1-\xi)\eta (x_2,y_2) + \xi\eta (x_3,y_3). \end{align*}" src="form_6280.png"/>

      -

      For the case of the curved surface, we want to modify this formula. For the top cell of the coarse mesh of the disk, we can assume that the points $(x_0,y_0)$ and $(x_1,y_1)$ sit along the straight line at the lower end and the points $(x_2,y_2)$ and $(x_3,y_3)$ are connected by a quarter circle along the top. We would then map a point $(\xi, \eta)$ as

      +

      For the case of the curved surface, we want to modify this formula. For the top cell of the coarse mesh of the disk, we can assume that the points $(x_0,y_0)$ and $(x_1,y_1)$ sit along the straight line at the lower end and the points $(x_2,y_2)$ and $(x_3,y_3)$ are connected by a quarter circle along the top. We would then map a point $(\xi, \eta)$ as

      \begin{align*}
 (x,y) = (1-\eta) \big[(1-\xi) (x_0,y_0) + \xi (x_1,y_1)\big] +
       \eta \mathbf{c}_3(\xi),
 \end{align*}

      where $\mathbf{c}_3(\xi)$ is a curve that describes the $(x,y)$ coordinates of the quarter circle in terms of an arclength parameter $\xi\in (0,1)$. This represents a linear interpolation between the straight lower edge and the curved upper edge of the cell, and is the basis for the picture shown above.

      -

      This formula is easily generalized to the case where all four edges are described by a curve rather than a straight line. We call the four functions, parameterized by a single coordinate $\xi$ or $\eta$ in the horizontal and vertical directions, $\mathbf{c}_0, \mathbf{c}_1, \mathbf{c}_2,
+<p>This formula is easily generalized to the case where all four edges are described by a curve rather than a straight line. We call the four functions, parameterized by a single coordinate <picture><source srcset=$\xi$ or $\eta$ in the horizontal and vertical directions, $\mathbf{c}_0, \mathbf{c}_1, \mathbf{c}_2,
 \mathbf{c}_3$ for the left, right, lower, and upper edge of a quadrilateral, respectively. The interpolation then reads

      \begin{align*}
 (x,y) =& (1-\xi)\mathbf{c}_0(\eta) + \xi \mathbf{c}_1(\eta)
@@ -216,7 +216,7 @@
 <p>As a final remark on transfinite interpolation, we mention that the mesh refinement strategies in deal.II in absence of a volume manifold description are also based on the weights of the transfinite interpolation and optimal in that sense. The difference is that the default algorithm sees only one cell at a time, and so will apply the optimal algorithm only on those cells touching the curved manifolds. In contrast, using the transfinite mapping on entire <em>patches</em> of cells (originating from one coarser cell) allows to use the transfinite interpolation method in a way that propagates information from the boundary to cells far away.</p>
 <p><a class=

      Transfinite interpolation is expensive and how to deal with it

      A mesh with a transfinite manifold description is typically set up in two steps. The first step is to create a coarse mesh (or read it in from a file) and to attach a curved manifold to some of the mesh entities. For the above example of the disk, we attach a polar manifold to the faces along the outer circle (this is done automatically by GridGenerator::hyper_ball()). Before we start refining the mesh, we then assign a TransfiniteInterpolationManifold to all interior cells and edges of the mesh, which of course needs to be based on some manifold id that we have assigned to those entities (everything except the circle on the boundary). It does not matter whether we also assign a TransfiniteInterpolationManifold to the inner square of the disk or not because the transfinite interpolation on a coarse cell with straight edges (or flat faces in 3d) simply yields subdivided children with straight edges (flat faces).

      -

      Later, when the mesh is refined or when a higher-order mapping is set up based on this mesh, the cells will query the underlying manifold object for new points. This process takes a set of surrounding points, for example the four vertices of a two-dimensional cell, and a set of weights to each of these points, for definition a new point. For the mid point of a cell, each of the four vertices would get weight 0.25. For the transfinite interpolation manifold, the process of building weighted sums requires some serious work. By construction, we want to combine the points in terms of the reference coordinates $\xi$ and $\eta$ (or $\xi, \eta, \zeta$ in 3D) of the surrounding points. However, the interface of the manifold classes in deal.II does not get the reference coordinates of the surrounding points (as they are not stored globally) but rather the physical coordinates only. Thus, the first step the transfinite interpolation manifold has to do is to invert the mapping and find the reference coordinates within one of the coarse cells of the transfinite interpolation (e.g. one of the four shaded coarse-grid cells of the disk mesh above). This inversion is done by a Newton iteration (or rather, finite-difference based Newton scheme combined with Broyden's method) and queries the transfinite interpolation according to the formula above several times. Each of these queries in turn might call an expensive manifold, e.g. a spherical description of a ball, and be expensive on its own. Since the Manifold interface class of deal.II only provides a set of points, the transfinite interpolation initially does not even know to which coarse grid cell the set of surrounding points belong to and needs to search among several cells based on some heuristics. In terms of charts, one could describe the implementation of the transfinite interpolation as an atlas-based implementation: Each cell of the initial coarse grid of the triangulation represents a chart with its own reference space, and the surrounding manifolds provide a way to transform from the chart space (i.e., the reference cell) to the physical space. The collection of the charts of the coarse grid cells is an atlas, and as usual, the first thing one does when looking up something in an atlas is to find the right chart.

      +

      Later, when the mesh is refined or when a higher-order mapping is set up based on this mesh, the cells will query the underlying manifold object for new points. This process takes a set of surrounding points, for example the four vertices of a two-dimensional cell, and a set of weights to each of these points, for definition a new point. For the mid point of a cell, each of the four vertices would get weight 0.25. For the transfinite interpolation manifold, the process of building weighted sums requires some serious work. By construction, we want to combine the points in terms of the reference coordinates $\xi$ and $\eta$ (or $\xi, \eta, \zeta$ in 3D) of the surrounding points. However, the interface of the manifold classes in deal.II does not get the reference coordinates of the surrounding points (as they are not stored globally) but rather the physical coordinates only. Thus, the first step the transfinite interpolation manifold has to do is to invert the mapping and find the reference coordinates within one of the coarse cells of the transfinite interpolation (e.g. one of the four shaded coarse-grid cells of the disk mesh above). This inversion is done by a Newton iteration (or rather, finite-difference based Newton scheme combined with Broyden's method) and queries the transfinite interpolation according to the formula above several times. Each of these queries in turn might call an expensive manifold, e.g. a spherical description of a ball, and be expensive on its own. Since the Manifold interface class of deal.II only provides a set of points, the transfinite interpolation initially does not even know to which coarse grid cell the set of surrounding points belong to and needs to search among several cells based on some heuristics. In terms of charts, one could describe the implementation of the transfinite interpolation as an atlas-based implementation: Each cell of the initial coarse grid of the triangulation represents a chart with its own reference space, and the surrounding manifolds provide a way to transform from the chart space (i.e., the reference cell) to the physical space. The collection of the charts of the coarse grid cells is an atlas, and as usual, the first thing one does when looking up something in an atlas is to find the right chart.

      Once the reference coordinates of the surrounding points have been found, a new point in the reference coordinate system is computed by a simple weighted sum. Finally, the reference point is inserted into the formula for the transfinite interpolation, which gives the desired new point.

      In a number of cases, the curved manifold is not only used during mesh refinement, but also to ensure a curved representation of boundaries within the cells of the computational domain. This is a necessity to guarantee high-order convergence for high-order polynomials on complex geometries anyway, but sometimes an accurate geometry is also desired with linear shape functions. This is often done by polynomial descriptions of the cells and called the isoparametric concept if the polynomial degree to represent the curved mesh elements is the same as the degree of the polynomials for the numerical solution. If the degree of the geometry is higher or lower than the solution, one calls that a super- or sub-parametric geometry representation, respectively. In deal.II, the standard class for polynomial representation is MappingQ. If, for example, this class is used with polynomial degree $4$ in 3D, a total of 125 (i.e., $(4+1)^3$) points are needed for the interpolation. Among these points, 8 are the cell's vertices and already available from the mesh, but the other 117 need to be provided by the manifold. In case the transfinite interpolation manifold is used, we can imagine that going through the pull-back into reference coordinates of some yet to be determined coarse cell, followed by subsequent push-forward on each of the 117 points, is a lot of work and can be very time consuming.

      What makes things worse is that the structure of many programs is such that the mapping is queried several times independently for the same cell. Its primary use is in the assembly of the linear system, i.e., the computation of the system matrix and the right hand side, via the mapping argument of the FEValues object. However, also the interpolation of boundary values, the computation of numerical errors, writing the output, and evaluation of error estimators must involve the same mapping to ensure a consistent interpretation of the solution vectors. Thus, even a linear stationary problem that is solved once will evaluate the points of the mapping several times. For the cubic case in 3D mentioned above, this means computing 117 points per cell by an expensive algorithm many times. The situation is more pressing for nonlinear or time-dependent problems where those operations are done over and over again.

      @@ -468,7 +468,7 @@ \sum_{k=1}^d\text{det}(J) w_q a(x)\frac{\partial \varphi_i(\boldsymbol \xi_q)}{\partial x_k} \frac{\partial \varphi_j(\boldsymbol \xi_q)}{\partial x_k}$" src="form_6307.png"/>, which is exactly the terms needed for the bilinear form of the Laplace equation.

      -

      The reason for choosing this somewhat unusual scheme is due to the heavy work involved in computing the cell matrix for a relatively high polynomial degree in 3d. As we want to highlight the cost of the mapping in this tutorial program, we better do the assembly in an optimized way in order to not chase bottlenecks that have been solved by the community already. Matrix-matrix multiplication is one of the best optimized kernels in the HPC context, and the FullMatrix::mTmult() function will call into those optimized BLAS functions. If the user has provided a good BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the computation of the cell matrix will execute close to the processor's peak arithmetic performance. As a side note, we mention that despite an optimized matrix-matrix multiplication, the current strategy is sub-optimal in terms of complexity as the work to be done is proportional to $(p+1)^9$ operations for degree $p$ (this also applies to the usual evaluation with FEValues). One could compute the cell matrix with $\mathcal O((p+1)^7)$ operations by utilizing the tensor product structure of the shape functions, as is done by the matrix-free framework in deal.II. We refer to step-37 and the documentation of the tensor-product-aware evaluators FEEvaluation for details on how an even more efficient cell matrix computation could be realized.

      +

      The reason for choosing this somewhat unusual scheme is due to the heavy work involved in computing the cell matrix for a relatively high polynomial degree in 3d. As we want to highlight the cost of the mapping in this tutorial program, we better do the assembly in an optimized way in order to not chase bottlenecks that have been solved by the community already. Matrix-matrix multiplication is one of the best optimized kernels in the HPC context, and the FullMatrix::mTmult() function will call into those optimized BLAS functions. If the user has provided a good BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the computation of the cell matrix will execute close to the processor's peak arithmetic performance. As a side note, we mention that despite an optimized matrix-matrix multiplication, the current strategy is sub-optimal in terms of complexity as the work to be done is proportional to $(p+1)^9$ operations for degree $p$ (this also applies to the usual evaluation with FEValues). One could compute the cell matrix with $\mathcal O((p+1)^7)$ operations by utilizing the tensor product structure of the shape functions, as is done by the matrix-free framework in deal.II. We refer to step-37 and the documentation of the tensor-product-aware evaluators FEEvaluation for details on how an even more efficient cell matrix computation could be realized.

        template <int dim>
        void PoissonProblem<dim>::assemble_system(const Mapping<dim> &mapping)
        {
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2024-03-17 21:57:48.503265550 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2024-03-17 21:57:48.511265600 +0000 @@ -159,7 +159,7 @@

      This problem is also called the Gelfand problem and is a typical example for problems from combustion theory, see for example [bebernes1989mathematical].

      Discretization with finite elements

      -

      As usual, we first derive the weak formulation for this problem by multiplying with a smooth test function $v\colon\Omega\to\mathbb{R}$ respecting the boundary condition and integrating over the domain $\Omega$. Integration by parts and putting the term from the right hand side to the left yields the weak formulation: Find a function $u\colon\Omega\to\mathbb{R}$ such that for all test functions $v$ it holds:

      +

      As usual, we first derive the weak formulation for this problem by multiplying with a smooth test function $v\colon\Omega\to\mathbb{R}$ respecting the boundary condition and integrating over the domain $\Omega$. Integration by parts and putting the term from the right hand side to the left yields the weak formulation: Find a function $u\colon\Omega\to\mathbb{R}$ such that for all test functions $v$ it holds:

      \begin{align*}
  \int_\Omega \nabla v \cdot \nabla u \,\mathrm{d}x
  -
@@ -186,8 +186,8 @@
  &\text{Update: }                       & u_h^{n+1} &= u_h^n + s_h^n.
 \end{align*}

      -

      So in each Newton step we have to solve a linear problem $A\,x = b$, where the system matrix $A$ is represented by the Jacobian $F'(u_h^n)[\,\cdot\,]\colon\mathbb{R}^N\to\mathbb{R}^N$ and the right hand side $b$ by the negative residual $-F(u_h^n)$. The solution vector $x$ is in that case the Newton update of the $n$-th Newton step. Note, that we assume an initial guess $u_h^0$, which already fulfills the Dirichlet boundary conditions of the problem formulation (in fact this could also be an inhomogeneous Dirichlet boundary condition) and thus the Newton updates $s_h$ satisfy a homogeneous Dirichlet condition.

      -

      Until now we only tested with the basis functions, however, we can also represent any function of $V_h$ as linear combination of basis functions. More mathematically this means, that every element of $V_h$ can be identified with a vector $U\in\mathbb{R}^N$ via the representation formula: $u_h = \sum_{i=1}^N U_i \varphi_i$. So using this we can give an expression for the discrete Jacobian and the residual:

      +

      So in each Newton step we have to solve a linear problem $A\,x = b$, where the system matrix $A$ is represented by the Jacobian $F'(u_h^n)[\,\cdot\,]\colon\mathbb{R}^N\to\mathbb{R}^N$ and the right hand side $b$ by the negative residual $-F(u_h^n)$. The solution vector $x$ is in that case the Newton update of the $n$-th Newton step. Note, that we assume an initial guess $u_h^0$, which already fulfills the Dirichlet boundary conditions of the problem formulation (in fact this could also be an inhomogeneous Dirichlet boundary condition) and thus the Newton updates $s_h$ satisfy a homogeneous Dirichlet condition.

      +

      Until now we only tested with the basis functions, however, we can also represent any function of $V_h$ as linear combination of basis functions. More mathematically this means, that every element of $V_h$ can be identified with a vector $U\in\mathbb{R}^N$ via the representation formula: $u_h = \sum_{i=1}^N U_i \varphi_i$. So using this we can give an expression for the discrete Jacobian and the residual:

      \begin{align*}
  A_{ij} = \bigl( F'(u_h^n) \bigr)_{ij}
  &=
@@ -837,7 +837,7 @@
 <div class=

      TasksParallelScheme tasks_parallel_scheme

      GelfandProblem::evaluate_residual

      -

      Next we implement a function which evaluates the nonlinear discrete residual for a given input vector ( $\texttt{dst} = F(\texttt{src})$). This function is then used for the assembly of the right hand side of the linearized system and later for the computation of the residual of the next Newton step to check if we already reached the error tolerance. As this function should not affect any class variable we define it as a constant function. Internally we exploit the fast finite element evaluation through the FEEvaluation class and the MatrixFree::cell_loop(), similar to apply_add() function of the JacobianOperator.

      +

      Next we implement a function which evaluates the nonlinear discrete residual for a given input vector ( $\texttt{dst} = F(\texttt{src})$). This function is then used for the assembly of the right hand side of the linearized system and later for the computation of the residual of the next Newton step to check if we already reached the error tolerance. As this function should not affect any class variable we define it as a constant function. Internally we exploit the fast finite element evaluation through the FEEvaluation class and the MatrixFree::cell_loop(), similar to apply_add() function of the JacobianOperator.

      First we create a pointer to the MatrixFree object, which is stored in the system_matrix. Then we pass the worker function local_evaluate_residual() for the cell wise evaluation of the residual together with the input and output vector to the MatrixFree::cell_loop(). In addition, we enable the zero out of the output vector in the loop, which is more efficient than calling dst = 0.0 separately before.

      Note that with this approach we do not have to take care about the MPI related data exchange, since all the bookkeeping is done by the MatrixFree::cell_loop().

        template <int dim, int fe_degree>
      @@ -901,7 +901,7 @@
       
       

      GelfandProblem::compute_residual

      -

      According to step-15 the following function computes the norm of the nonlinear residual for the solution $u_h^n + \alpha s_h^n$ with the help of the evaluate_residual() function. The Newton step length $\alpha$ becomes important if we would use an adaptive version of the Newton method. Then for example we would compute the residual for different step lengths and compare the residuals. However, for our problem the full Newton step with $\alpha=1$ is the best we can do. An adaptive version of Newton's method becomes interesting if we have no good initial value. Note that in theory Newton's method converges with quadratic order, but only if we have an appropriate initial value. For unsuitable initial values the Newton method diverges even with quadratic order. A common way is then to use a damped version $\alpha<1$ until the Newton step is good enough and the full Newton step can be performed. This was also discussed in step-15.

      +

      According to step-15 the following function computes the norm of the nonlinear residual for the solution $u_h^n + \alpha s_h^n$ with the help of the evaluate_residual() function. The Newton step length $\alpha$ becomes important if we would use an adaptive version of the Newton method. Then for example we would compute the residual for different step lengths and compare the residuals. However, for our problem the full Newton step with $\alpha=1$ is the best we can do. An adaptive version of Newton's method becomes interesting if we have no good initial value. Note that in theory Newton's method converges with quadratic order, but only if we have an appropriate initial value. For unsuitable initial values the Newton method diverges even with quadratic order. A common way is then to use a damped version $\alpha<1$ until the Newton step is good enough and the full Newton step can be performed. This was also discussed in step-15.

        template <int dim, int fe_degree>
        double GelfandProblem<dim, fe_degree>::compute_residual(const double alpha)
        {
      @@ -1044,9 +1044,9 @@
        TimerOutput::Scope t(computing_timer, "solve");
       
       
      -

      We define a maximal number of Newton steps and tolerances for the convergence criterion. Usually, with good starting values, the Newton method converges in three to six steps, so maximal ten steps should be totally sufficient. As tolerances we use $\|F(u^n_h)\|<\text{TOL}_f =
-   10^{-12}$ for the norm of the residual and $\|s_h^n\| < \text{TOL}_x =
-   10^{-10}$ for the norm of the Newton update. This seems a bit over the top, but we will see that, for our example, we will achieve these tolerances after a few steps.

      +

      We define a maximal number of Newton steps and tolerances for the convergence criterion. Usually, with good starting values, the Newton method converges in three to six steps, so maximal ten steps should be totally sufficient. As tolerances we use $\|F(u^n_h)\|<\text{TOL}_f =
+   10^{-12}$ for the norm of the residual and $\|s_h^n\| < \text{TOL}_x =
+   10^{-10}$ for the norm of the Newton update. This seems a bit over the top, but we will see that, for our example, we will achieve these tolerances after a few steps.

        const unsigned int itmax = 10;
        const double TOLf = 1e-12;
        const double TOLx = 1e-10;
      @@ -1066,7 +1066,7 @@
        compute_update();
       
       
      -

      Then we compute the errors, namely the norm of the Newton update and the residual. Note that at this point one could incorporate a step size control for the Newton method by varying the input parameter $\alpha$ for the compute_residual function. However, here we just use $\alpha$ equal to one for a plain Newton iteration.

      +

      Then we compute the errors, namely the norm of the Newton update and the residual. Note that at this point one could incorporate a step size control for the Newton method by varying the input parameter $\alpha$ for the compute_residual function. However, here we just use $\alpha$ equal to one for a plain Newton iteration.

        const double ERRx = newton_update.l2_norm();
        const double ERRf = compute_residual(1.0);
       
      @@ -1451,17 +1451,17 @@
      +---------------------------------+-----------+------------+------------+

      We show the solution for the two- and three-dimensional problem in the following figure.

      Solution of the two-dimensional Gelfand problem.
      Solution of the three-dimensional Gelfand problem.

      Newton solver

      -

      In the program output above we find some interesting information about the Newton iterations. The terminal output in each refinement cycle presents detailed diagnostics of the Newton method, which show first of all the number of Newton steps and for each step the norm of the residual $\|F(u_h^{n+1})\|$, the norm of the Newton update $\|s_h^n\|$, and the number of CG iterations it.

      -

      We observe that for all cases the Newton method converges in approximately three to four steps, which shows the quadratic convergence of the Newton method with a full step length $\alpha = 1$. However, be aware that for a badly chosen initial guess $u_h^0$, the Newton method will also diverge quadratically. Usually if you do not have an appropriate initial guess, you try a few damped Newton steps with a reduced step length $\alpha < 1$ until the Newton step is again in the quadratic convergence domain. This damping and relaxation of the Newton step length truly requires a more sophisticated implementation of the Newton method, which we designate to you as a possible extension of the tutorial.

      +

      In the program output above we find some interesting information about the Newton iterations. The terminal output in each refinement cycle presents detailed diagnostics of the Newton method, which show first of all the number of Newton steps and for each step the norm of the residual $\|F(u_h^{n+1})\|$, the norm of the Newton update $\|s_h^n\|$, and the number of CG iterations it.

      +

      We observe that for all cases the Newton method converges in approximately three to four steps, which shows the quadratic convergence of the Newton method with a full step length $\alpha = 1$. However, be aware that for a badly chosen initial guess $u_h^0$, the Newton method will also diverge quadratically. Usually if you do not have an appropriate initial guess, you try a few damped Newton steps with a reduced step length $\alpha < 1$ until the Newton step is again in the quadratic convergence domain. This damping and relaxation of the Newton step length truly requires a more sophisticated implementation of the Newton method, which we designate to you as a possible extension of the tutorial.

      Furthermore, we see that the number of CG iterations is approximately constant with successive mesh refinements and an increasing number of DoFs. This is of course due to the geometric multigrid preconditioner and similar to the observations made in other tutorials that use this method, e.g., step-16 and step-37. Just to give an example, in the three-dimensional case after five refinements, we have approximately 14.7 million distributed DoFs with fourth-order Lagrangian finite elements, but the number of CG iterations is still less than ten.

      In addition, there is one more very useful optimization that we applied and that should be mentioned here. In the compute_update() function we explicitly reset the vector holding the Newton update before passing it as the output vector to the solver. In that case we use a starting value of zero for the CG method, which is more suitable than the previous Newton update, the actual content of the newton_update before resetting, and thus reduces the number of CG iterations by a few steps.

      Possibilities for extensions

      A couple of possible extensions are available concerning minor updates to the present code as well as a deeper numerical investigation of the Gelfand problem.

      More sophisticated Newton iteration

      Beside a step size controlled version of the Newton iteration as mentioned already in step-15 (and actually implemented, with many more bells and whistles, in step-77), one could also implement a more flexible stopping criterion for the Newton iteration. For example one could replace the fixed tolerances for the residual TOLf and for the Newton updated TOLx and implement a mixed error control with a given absolute and relative tolerance, such that the Newton iteration exits with success as, e.g.,

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \|F(u_h^{n+1})\| \leq \texttt{RelTol} \|u_h^{n+1}\| + \texttt{AbsTol}.
-\end{align*} +\end{align*}" src="form_6344.png"/>

      For more advanced applications with many nonlinear systems to solve, for example at each time step for a time-dependent problem, it turns out that it is not necessary to set up and assemble the Jacobian anew at every single Newton step or even for each time step. Instead, the existing Jacobian from a previous step can be used for the Newton iteration. The Jacobian is then only rebuilt if, for example, the Newton iteration converges too slowly. Such an idea yields a quasi-Newton method. Admittedly, when using the matrix-free framework, the assembly of the Jacobian is omitted anyway, but with in this way one can try to optimize the reassembly of the geometric multigrid preconditioner. Remember that each time the solution from the old Newton step must be distributed to all levels and the mutligrid preconditioner must be reinitialized.

      Parallel scalability and thread parallelism

      @@ -1469,9 +1469,9 @@

      Comparison to matrix-based methods

      Analogously to step-50 and the mentioned possible extension of step-75, you can convince yourself which method is faster.

      Eigenvalue problem

      -

      One can consider the corresponding eigenvalue problem, which is called Bratu problem. For example, if we define a fixed eigenvalue $\lambda\in[0,6]$, we can compute the corresponding discrete eigenfunction. You will notice that the number of Newton steps will increase with increasing $\lambda$. To reduce the number of Newton steps you can use the following trick: start from a certain $\lambda$, compute the eigenfunction, increase $\lambda=\lambda +
-\delta_\lambda$, and then use the previous solution as an initial guess for the Newton iteration – this approach is called a "continuation -method". In the end you can plot the $H^1(\Omega)$-norm over the eigenvalue $\lambda \mapsto \|u_h\|_{H^1(\Omega)}$. What do you observe for further increasing $\lambda>7$?

      +

      One can consider the corresponding eigenvalue problem, which is called Bratu problem. For example, if we define a fixed eigenvalue $\lambda\in[0,6]$, we can compute the corresponding discrete eigenfunction. You will notice that the number of Newton steps will increase with increasing $\lambda$. To reduce the number of Newton steps you can use the following trick: start from a certain $\lambda$, compute the eigenfunction, increase $\lambda=\lambda +
+\delta_\lambda$, and then use the previous solution as an initial guess for the Newton iteration – this approach is called a "continuation +method". In the end you can plot the $H^1(\Omega)$-norm over the eigenvalue $\lambda \mapsto \|u_h\|_{H^1(\Omega)}$. What do you observe for further increasing $\lambda>7$?

      The plain program

      /* ---------------------------------------------------------------------
      *
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 2024-03-17 21:57:48.643266414 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 2024-03-17 21:57:48.643266414 +0000 @@ -144,15 +144,15 @@

      This tutorial program solves the Euler equations of fluid dynamics using an explicit time integrator with the matrix-free framework applied to a high-order discontinuous Galerkin discretization in space. For details about the Euler system and an alternative implicit approach, we also refer to the step-33 tutorial program. You might also want to look at step-69 for an alternative approach to solving these equations.

      The Euler equations

      The Euler equations are a conservation law, describing the motion of a compressible inviscid gas,

      -\[
+<picture><source srcset=\[
 \frac{\partial \mathbf{w}}{\partial t} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_6349.png"/>

      -

      where the $d+2$ components of the solution vector are $\mathbf{w}=(\rho, \rho
-u_1,\ldots,\rho u_d,E)^{\mathrm T}$. Here, $\rho$ denotes the fluid density, ${\mathbf u}=(u_1,\ldots, u_d)^\mathrm T$ the fluid velocity, and $E$ the energy density of the gas. The velocity is not directly solved for, but rather the variable $\rho \mathbf{u}$, the linear momentum (since this is the conserved quantity).

      -

      The Euler flux function, a $(d+2)\times d$ matrix, is defined as

      -\[
+<p> where the <picture><source srcset=$d+2$ components of the solution vector are $\mathbf{w}=(\rho, \rho
+u_1,\ldots,\rho u_d,E)^{\mathrm T}$. Here, $\rho$ denotes the fluid density, ${\mathbf u}=(u_1,\ldots, u_d)^\mathrm T$ the fluid velocity, and $E$ the energy density of the gas. The velocity is not directly solved for, but rather the variable $\rho \mathbf{u}$, the linear momentum (since this is the conserved quantity).

      +

      The Euler flux function, a $(d+2)\times d$ matrix, is defined as

      +\[
   \mathbf F(\mathbf w)
   =
   \begin{pmatrix}
@@ -160,10 +160,10 @@
   \rho \mathbf{u} \otimes \mathbf{u} + \mathbb{I}p\\
   (E+p)\mathbf{u}
   \end{pmatrix}
-\] +\]" src="form_6355.png"/>

      -

      with $\mathbb{I}$ the $d\times d$ identity matrix and $\otimes$ the outer product; its components denote the mass, momentum, and energy fluxes, respectively. The right hand side forcing is given by

      -\[
+<p> with <picture><source srcset=$\mathbb{I}$ the $d\times d$ identity matrix and $\otimes$ the outer product; its components denote the mass, momentum, and energy fluxes, respectively. The right hand side forcing is given by

      +\[
   \mathbf G(\mathbf w)
   =
   \begin{pmatrix}
@@ -171,53 +171,53 @@
   \rho\mathbf{g}\\
   \rho \mathbf{u} \cdot \mathbf{g}
   \end{pmatrix},
-\] +\]" src="form_6357.png"/>

      -

      where the vector $\mathbf g$ denotes the direction and magnitude of gravity. It could, however, also denote any other external force per unit mass that is acting on the fluid. (Think, for example, of the electrostatic forces exerted by an external electric field on charged particles.)

      -

      The three blocks of equations, the second involving $d$ components, describe the conservation of mass, momentum, and energy. The pressure is not a solution variable but needs to be expressed through a "closure relationship" by the other variables; we here choose the relationship appropriate for a gas with molecules composed of two atoms, which at moderate temperatures is given by $p=(\gamma - 1) \left(E-\frac 12 \rho
-\mathbf{u}\cdot \mathbf{u}\right)$ with the constant $\gamma = 1.4$.

      +

      where the vector $\mathbf g$ denotes the direction and magnitude of gravity. It could, however, also denote any other external force per unit mass that is acting on the fluid. (Think, for example, of the electrostatic forces exerted by an external electric field on charged particles.)

      +

      The three blocks of equations, the second involving $d$ components, describe the conservation of mass, momentum, and energy. The pressure is not a solution variable but needs to be expressed through a "closure relationship" by the other variables; we here choose the relationship appropriate for a gas with molecules composed of two atoms, which at moderate temperatures is given by $p=(\gamma - 1) \left(E-\frac 12 \rho
+\mathbf{u}\cdot \mathbf{u}\right)$ with the constant $\gamma = 1.4$.

      High-order discontinuous Galerkin discretization

      For spatial discretization, we use a high-order discontinuous Galerkin (DG) discretization, using a solution expansion of the form

      -\[
+<picture><source srcset=\[
 \mathbf{w}_h(\mathbf{x}, t) =
 \sum_{j=1}^{n_\mathbf{dofs}} \boldsymbol{\varphi}_j(\mathbf{x}) {w}_j(t).
-\] +\]" src="form_6361.png"/>

      -

      Here, $\boldsymbol{\varphi}_j$ denotes the $j$th basis function, written in vector form with separate shape functions for the different components and letting $w_j(t)$ go through the density, momentum, and energy variables, respectively. In this form, the space dependence is contained in the shape functions and the time dependence in the unknown coefficients $w_j$. As opposed to the continuous finite element method where some shape functions span across element boundaries, the shape functions are local to a single element in DG methods, with a discontinuity from one element to the next. The connection of the solution from one cell to its neighbors is instead imposed by the numerical fluxes specified below. This allows for some additional flexibility, for example to introduce directionality in the numerical method by, e.g., upwinding.

      +

      Here, $\boldsymbol{\varphi}_j$ denotes the $j$th basis function, written in vector form with separate shape functions for the different components and letting $w_j(t)$ go through the density, momentum, and energy variables, respectively. In this form, the space dependence is contained in the shape functions and the time dependence in the unknown coefficients $w_j$. As opposed to the continuous finite element method where some shape functions span across element boundaries, the shape functions are local to a single element in DG methods, with a discontinuity from one element to the next. The connection of the solution from one cell to its neighbors is instead imposed by the numerical fluxes specified below. This allows for some additional flexibility, for example to introduce directionality in the numerical method by, e.g., upwinding.

      DG methods are popular methods for solving problems of transport character because they combine low dispersion errors with controllable dissipation on barely resolved scales. This makes them particularly attractive for simulation in the field of fluid dynamics where a wide range of active scales needs to be represented and inadequately resolved features are prone to disturb the important well-resolved features. Furthermore, high-order DG methods are well-suited for modern hardware with the right implementation. At the same time, DG methods are no silver bullet. In particular when the solution develops discontinuities (shocks), as is typical for the Euler equations in some flow regimes, high-order DG methods tend to oscillatory solutions, like all high-order methods when not using flux- or slope-limiters. This is a consequence of Godunov's theorem that states that any total variation limited (TVD) scheme that is linear (like a basic DG discretization) can at most be first-order accurate. Put differently, since DG methods aim for higher order accuracy, they cannot be TVD on solutions that develop shocks. Even though some communities claim that the numerical flux in DG methods can control dissipation, this is of limited value unless all shocks in a problem align with cell boundaries. Any shock that passes through the interior of cells will again produce oscillatory components due to the high-order polynomials. In the finite element and DG communities, there exist a number of different approaches to deal with shocks, for example the introduction of artificial diffusion on troubled cells (using a troubled-cell indicator based e.g. on a modal decomposition of the solution), a switch to dissipative low-order finite volume methods on a subgrid, or the addition of some limiting procedures. Given the ample possibilities in this context, combined with the considerable implementation effort, we here refrain from the regime of the Euler equations with pronounced shocks, and rather concentrate on the regime of subsonic flows with wave-like phenomena. For a method that works well with shocks (but is more expensive per unknown), we refer to the step-69 tutorial program.

      -

      For the derivation of the DG formulation, we multiply the Euler equations with test functions $\mathbf{v}$ and integrate over an individual cell $K$, which gives

      -\[
+<p>For the derivation of the DG formulation, we multiply the Euler equations with test functions <picture><source srcset=$\mathbf{v}$ and integrate over an individual cell $K$, which gives

      +\[
 \left(\mathbf{v}, \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 + \left(\mathbf{v}, \nabla \cdot \mathbf{F}(\mathbf{w})\right)_{K} =
 \left(\mathbf{v},\mathbf{G}(\mathbf w)\right)_{K}.
-\] +\]" src="form_6365.png"/>

      We then integrate the second term by parts, moving the divergence from the solution slot to the test function slot, and producing an integral over the element boundary:

      -\[
+<picture><source srcset=\[
 \left(\mathbf{v}, \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 - \left(\nabla \mathbf{v}, \mathbf{F}(\mathbf{w})\right)_{K}
 + \left<\mathbf{v}, \mathbf{n} \cdot \widehat{\mathbf{F}}(\mathbf{w})
 \right>_{\partial K} =
 \left(\mathbf{v},\mathbf{G}(\mathbf w)\right)_{K}.
-\] +\]" src="form_6366.png"/>

      -

      In the surface integral, we have replaced the term $\mathbf{F}(\mathbf w)$ by the term $\widehat{\mathbf{F}}(\mathbf w)$, the numerical flux. The role of the numerical flux is to connect the solution on neighboring elements and weakly impose continuity of the solution. This ensures that the global coupling of the PDE is reflected in the discretization, despite independent basis functions on the cells. The connectivity to the neighbor is included by defining the numerical flux as a function $\widehat{\mathbf{F}}(\mathbf w^-,
-\mathbf w^+)$ of the solution from both sides of an interior face, $\mathbf
-w^-$ and $\mathbf w^+$. A basic property we require is that the numerical flux needs to be conservative. That is, we want all information (i.e., mass, momentum, and energy) that leaves a cell over a face to enter the neighboring cell in its entirety and vice versa. This can be expressed as $\widehat{\mathbf{F}}(\mathbf w^-, \mathbf w^+) =
-\widehat{\mathbf{F}}(\mathbf w^+, \mathbf w^-)$, meaning that the numerical flux evaluates to the same result from either side. Combined with the fact that the numerical flux is multiplied by the unit outer normal vector on the face under consideration, which points in opposite direction from the two sides, we see that the conservation is fulfilled. An alternative point of view of the numerical flux is as a single-valued intermediate state that links the solution weakly from both sides.

      -

      There is a large number of numerical flux functions available, also called Riemann solvers. For the Euler equations, there exist so-called exact Riemann solvers – meaning that the states from both sides are combined in a way that is consistent with the Euler equations along a discontinuity – and approximate Riemann solvers, which violate some physical properties and rely on other mechanisms to render the scheme accurate overall. Approximate Riemann solvers have the advantage of being cheaper to compute. Most flux functions have their origin in the finite volume community, which are similar to DG methods with polynomial degree 0 within the cells (called volumes). As the volume integral of the Euler operator $\mathbf{F}$ would disappear for constant solution and test functions, the numerical flux must fully represent the physical operator, explaining why there has been a large body of research in that community. For DG methods, consistency is guaranteed by higher order polynomials within the cells, making the numerical flux less of an issue and usually affecting only the convergence rate, e.g., whether the solution converges as $\mathcal O(h^p)$, $\mathcal O(h^{p+1/2})$ or $\mathcal
-O(h^{p+1})$ in the $L_2$ norm for polynomials of degree $p$. The numerical flux can thus be seen as a mechanism to select more advantageous dissipation/dispersion properties or regarding the extremal eigenvalue of the discretized and linearized operator, which affect the maximal admissible time step size in explicit time integrators.

      +

      In the surface integral, we have replaced the term $\mathbf{F}(\mathbf w)$ by the term $\widehat{\mathbf{F}}(\mathbf w)$, the numerical flux. The role of the numerical flux is to connect the solution on neighboring elements and weakly impose continuity of the solution. This ensures that the global coupling of the PDE is reflected in the discretization, despite independent basis functions on the cells. The connectivity to the neighbor is included by defining the numerical flux as a function $\widehat{\mathbf{F}}(\mathbf w^-,
+\mathbf w^+)$ of the solution from both sides of an interior face, $\mathbf
+w^-$ and $\mathbf w^+$. A basic property we require is that the numerical flux needs to be conservative. That is, we want all information (i.e., mass, momentum, and energy) that leaves a cell over a face to enter the neighboring cell in its entirety and vice versa. This can be expressed as $\widehat{\mathbf{F}}(\mathbf w^-, \mathbf w^+) =
+\widehat{\mathbf{F}}(\mathbf w^+, \mathbf w^-)$, meaning that the numerical flux evaluates to the same result from either side. Combined with the fact that the numerical flux is multiplied by the unit outer normal vector on the face under consideration, which points in opposite direction from the two sides, we see that the conservation is fulfilled. An alternative point of view of the numerical flux is as a single-valued intermediate state that links the solution weakly from both sides.

      +

      There is a large number of numerical flux functions available, also called Riemann solvers. For the Euler equations, there exist so-called exact Riemann solvers – meaning that the states from both sides are combined in a way that is consistent with the Euler equations along a discontinuity – and approximate Riemann solvers, which violate some physical properties and rely on other mechanisms to render the scheme accurate overall. Approximate Riemann solvers have the advantage of being cheaper to compute. Most flux functions have their origin in the finite volume community, which are similar to DG methods with polynomial degree 0 within the cells (called volumes). As the volume integral of the Euler operator $\mathbf{F}$ would disappear for constant solution and test functions, the numerical flux must fully represent the physical operator, explaining why there has been a large body of research in that community. For DG methods, consistency is guaranteed by higher order polynomials within the cells, making the numerical flux less of an issue and usually affecting only the convergence rate, e.g., whether the solution converges as $\mathcal O(h^p)$, $\mathcal O(h^{p+1/2})$ or $\mathcal
+O(h^{p+1})$ in the $L_2$ norm for polynomials of degree $p$. The numerical flux can thus be seen as a mechanism to select more advantageous dissipation/dispersion properties or regarding the extremal eigenvalue of the discretized and linearized operator, which affect the maximal admissible time step size in explicit time integrators.

      In this tutorial program, we implement two variants of fluxes that can be controlled via a switch in the program (of course, it would be easy to make them a run time parameter controlled via an input file). The first flux is the local Lax–Friedrichs flux

      -\[
+<picture><source srcset=\[
 \hat{\mathbf{F}}(\mathbf{w}^-,\mathbf{w}^+) =
 \frac{\mathbf{F}(\mathbf{w}^-)+\mathbf{F}(\mathbf{w}^+)}{2} +
    \frac{\lambda}{2}\left[\mathbf{w}^--\mathbf{w}^+\right]\otimes
    \mathbf{n^-}.
-\] +\]" src="form_6375.png"/>

      -

      In the original definition of the Lax–Friedrichs flux, a factor $\lambda =
-\max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)$ is used (corresponding to the maximal speed at which information is moving on the two sides of the interface), stating that the difference between the two states, $[\![\mathbf{w}]\!]$ is penalized by the largest eigenvalue in the Euler flux, which is $\|\mathbf{u}\|+c$, where $c=\sqrt{\gamma p / \rho}$ is the speed of sound. In the implementation below, we modify the penalty term somewhat, given that the penalty is of approximate nature anyway. We use

      -\begin{align*}
+<p>In the original definition of the Lax–Friedrichs flux, a factor <picture><source srcset=$\lambda =
+\max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)$ is used (corresponding to the maximal speed at which information is moving on the two sides of the interface), stating that the difference between the two states, $[\![\mathbf{w}]\!]$ is penalized by the largest eigenvalue in the Euler flux, which is $\|\mathbf{u}\|+c$, where $c=\sqrt{\gamma p / \rho}$ is the speed of sound. In the implementation below, we modify the penalty term somewhat, given that the penalty is of approximate nature anyway. We use

      +\begin{align*}
 \lambda
 &=
 \frac{1}{2}\max\left(\sqrt{\|\mathbf{u^-}\|^2+(c^-)^2},
@@ -226,52 +226,52 @@
 &=
 \frac{1}{2}\sqrt{\max\left(\|\mathbf{u^-}\|^2+(c^-)^2,
                            \|\mathbf{u}^+\|^2+(c^+)^2\right)}.
-\end{align*} +\end{align*}" src="form_6380.png"/>

      -

      The additional factor $\frac 12$ reduces the penalty strength (which results in a reduced negative real part of the eigenvalues, and thus increases the admissible time step size). Using the squares within the sums allows us to reduce the number of expensive square root operations, which is 4 for the original Lax–Friedrichs definition, to a single one. This simplification leads to at most a factor of 2 in the reduction of the parameter $\lambda$, since $\|\mathbf{u}\|^2+c^2 \leq
+<p> The additional factor <picture><source srcset=$\frac 12$ reduces the penalty strength (which results in a reduced negative real part of the eigenvalues, and thus increases the admissible time step size). Using the squares within the sums allows us to reduce the number of expensive square root operations, which is 4 for the original Lax–Friedrichs definition, to a single one. This simplification leads to at most a factor of 2 in the reduction of the parameter $\lambda$, since $\|\mathbf{u}\|^2+c^2 \leq
 \|\mathbf{u}\|^2+2 c |\mathbf{u}\| + c^2 = \left(\|\mathbf{u}\|+c\right)^2
-\leq 2 \left(\|\mathbf{u}\|^2+c^2\right)$, with the last inequality following from Young's inequality.

      -

      The second numerical flux is one proposed by Harten, Lax and van Leer, called the HLL flux. It takes the different directions of propagation of the Euler equations into account, depending on the speed of sound. It utilizes some intermediate states $\bar{\mathbf{u}}$ and $\bar{c}$ to define the two branches $s^\mathrm{p} = \max\left(0, \bar{\mathbf{u}}\cdot \mathbf{n} +
-\bar{c}\right)$ and $s^\mathrm{n} = \min\left(0, \bar{\mathbf{u}}\cdot
-\mathbf{n} - \bar{c}\right)$. From these branches, one then defines the flux

      -\[
+\leq 2 \left(\|\mathbf{u}\|^2+c^2\right)$, with the last inequality following from Young's inequality.

      +

      The second numerical flux is one proposed by Harten, Lax and van Leer, called the HLL flux. It takes the different directions of propagation of the Euler equations into account, depending on the speed of sound. It utilizes some intermediate states $\bar{\mathbf{u}}$ and $\bar{c}$ to define the two branches $s^\mathrm{p} = \max\left(0, \bar{\mathbf{u}}\cdot \mathbf{n} +
+\bar{c}\right)$ and $s^\mathrm{n} = \min\left(0, \bar{\mathbf{u}}\cdot
+\mathbf{n} - \bar{c}\right)$. From these branches, one then defines the flux

      +\[
 \hat{\mathbf{F}}(\mathbf{w}^-,\mathbf{w}^+) =
 \frac{s^\mathrm{p} \mathbf{F}(\mathbf{w}^-)-s^\mathrm{n} \mathbf{F}(\mathbf{w}^+)}
                    {s^\mathrm p - s^\mathrm{n} } +
 \frac{s^\mathrm{p} s^\mathrm{n}}{s^\mathrm{p}-s^\mathrm{n}}
 \left[\mathbf{w}^--\mathbf{w}^+\right]\otimes \mathbf{n^-}.
-\] +\]" src="form_6386.png"/>

      -

      Regarding the definition of the intermediate state $\bar{\mathbf{u}}$ and $\bar{c}$, several variants have been proposed. The variant originally proposed uses a density-averaged definition of the velocity, $\bar{\mathbf{u}}
+<p> Regarding the definition of the intermediate state <picture><source srcset=$\bar{\mathbf{u}}$ and $\bar{c}$, several variants have been proposed. The variant originally proposed uses a density-averaged definition of the velocity, $\bar{\mathbf{u}}
 = \frac{\sqrt{\rho^-} \mathbf{u}^- + \sqrt{\rho^+}\mathbf{u}^+}{\sqrt{\rho^-}
-+ \sqrt{\rho^+}}$. Since we consider the Euler equations without shocks, we simply use arithmetic means, $\bar{\mathbf{u}} = \frac{\mathbf{u}^- +
-\mathbf{u}^+}{2}$ and $\bar{c} = \frac{c^- + c^+}{2}$, with $c^{\pm} =
-\sqrt{\gamma p^{\pm} / \rho^{\pm}}$, in this tutorial program, and leave other variants to a possible extension. We also note that the HLL flux has been extended in the literature to the so-called HLLC flux, where C stands for the ability to represent contact discontinuities.

      -

      At the boundaries with no neighboring state $\mathbf{w}^+$ available, it is common practice to deduce suitable exterior values from the boundary conditions (see the general literature on DG methods for details). In this tutorial program, we consider three types of boundary conditions, namely inflow boundary conditions where all components are prescribed,

      -\[
++ \sqrt{\rho^+}}$. Since we consider the Euler equations without shocks, we simply use arithmetic means, $\bar{\mathbf{u}} = \frac{\mathbf{u}^- +
+\mathbf{u}^+}{2}$ and $\bar{c} = \frac{c^- + c^+}{2}$, with $c^{\pm} =
+\sqrt{\gamma p^{\pm} / \rho^{\pm}}$, in this tutorial program, and leave other variants to a possible extension. We also note that the HLL flux has been extended in the literature to the so-called HLLC flux, where C stands for the ability to represent contact discontinuities.

      +

      At the boundaries with no neighboring state $\mathbf{w}^+$ available, it is common practice to deduce suitable exterior values from the boundary conditions (see the general literature on DG methods for details). In this tutorial program, we consider three types of boundary conditions, namely inflow boundary conditions where all components are prescribed,

      +\[
 \mathbf{w}^+ = \begin{pmatrix} \rho_\mathrm{D}(t)\\
 (\rho \mathbf u)_{\mathrm D}(t) \\ E_\mathrm{D}(t)\end{pmatrix} \quad
  \text{(Dirichlet)},
-\] +\]" src="form_6392.png"/>

      subsonic outflow boundaries, where we do not prescribe exterior solutions as the flow field is leaving the domain and use the interior values instead; we still need to prescribe the energy as there is one incoming characteristic left in the Euler flux,

      -\[
+<picture><source srcset=\[
 \mathbf{w}^+ = \begin{pmatrix} \rho^-\\
 (\rho \mathbf u)^- \\ E_\mathrm{D}(t)\end{pmatrix} \quad
  \text{(mixed Neumann/Dirichlet)},
-\] +\]" src="form_6393.png"/>

      and wall boundary condition which describe a no-penetration configuration:

      -\[
+<picture><source srcset=\[
 \mathbf{w}^+ = \begin{pmatrix} \rho^-\\
 (\rho \mathbf u)^- - 2 [(\rho \mathbf u)^-\cdot \mathbf n] \mathbf{n}
  \\ E^-\end{pmatrix}.
-\] +\]" src="form_6394.png"/>

      -

      The polynomial expansion of the solution is finally inserted to the weak form and test functions are replaced by the basis functions. This gives a discrete in space, continuous in time nonlinear system with a finite number of unknown coefficient values $w_j$, $j=1,\ldots,n_\text{dofs}$. Regarding the choice of the polynomial degree in the DG method, there is no consensus in literature as of 2019 as to what polynomial degrees are most efficient and the decision is problem-dependent. Higher order polynomials ensure better convergence rates and are thus superior for moderate to high accuracy requirements for smooth solutions. At the same time, the volume-to-surface ratio of where degrees of freedom are located, increases with higher degrees, and this makes the effect of the numerical flux weaker, typically reducing dissipation. However, in most of the cases the solution is not smooth, at least not compared to the resolution that can be afforded. This is true for example in incompressible fluid dynamics, compressible fluid dynamics, and the related topic of wave propagation. In this pre-asymptotic regime, the error is approximately proportional to the numerical resolution, and other factors such as dispersion errors or the dissipative behavior become more important. Very high order methods are often ruled out because they come with more restrictive CFL conditions measured against the number of unknowns, and they are also not as flexible when it comes to representing complex geometries. Therefore, polynomial degrees between two and six are most popular in practice, see e.g. the efficiency evaluation in [FehnWallKronbichler2019] and references cited therein.

      +

      The polynomial expansion of the solution is finally inserted to the weak form and test functions are replaced by the basis functions. This gives a discrete in space, continuous in time nonlinear system with a finite number of unknown coefficient values $w_j$, $j=1,\ldots,n_\text{dofs}$. Regarding the choice of the polynomial degree in the DG method, there is no consensus in literature as of 2019 as to what polynomial degrees are most efficient and the decision is problem-dependent. Higher order polynomials ensure better convergence rates and are thus superior for moderate to high accuracy requirements for smooth solutions. At the same time, the volume-to-surface ratio of where degrees of freedom are located, increases with higher degrees, and this makes the effect of the numerical flux weaker, typically reducing dissipation. However, in most of the cases the solution is not smooth, at least not compared to the resolution that can be afforded. This is true for example in incompressible fluid dynamics, compressible fluid dynamics, and the related topic of wave propagation. In this pre-asymptotic regime, the error is approximately proportional to the numerical resolution, and other factors such as dispersion errors or the dissipative behavior become more important. Very high order methods are often ruled out because they come with more restrictive CFL conditions measured against the number of unknowns, and they are also not as flexible when it comes to representing complex geometries. Therefore, polynomial degrees between two and six are most popular in practice, see e.g. the efficiency evaluation in [FehnWallKronbichler2019] and references cited therein.

      Explicit time integration

      To discretize in time, we slightly rearrange the weak form and sum over all cells:

      -\[
+<picture><source srcset=\[
 \sum_{K \in \mathcal T_h} \left(\boldsymbol{\varphi}_i,
 \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 =
@@ -282,13 +282,13 @@
 \mathbf{n} \cdot \widehat{\mathbf{F}}(\mathbf{w})\right>_{\partial K} +
 \left(\boldsymbol{\varphi}_i,\mathbf{G}(\mathbf w)\right)_{K}
 \right],
-\] +\]" src="form_6396.png"/>

      -

      where $\boldsymbol{\varphi}_i$ runs through all basis functions with from 1 to $n_\text{dofs}$.

      -

      We now denote by $\mathcal M$ the mass matrix with entries $\mathcal M_{ij} =
+<p> where <picture><source srcset=$\boldsymbol{\varphi}_i$ runs through all basis functions with from 1 to $n_\text{dofs}$.

      +

      We now denote by $\mathcal M$ the mass matrix with entries $\mathcal M_{ij} =
/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html	2024-03-17 21:57:48.707266810 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html	2024-03-17 21:57:48.711266835 +0000
@@ -141,17 +141,17 @@
 <p><a class=

      Introduction

      Simulation of the motion of massless tracer particles in a vortical flow

      Particles play an important part in numerical models for a large number of applications. Particles are routinely used as massless tracers to visualize the dynamic of a transient flow. They can also play an intrinsic role as part of a more complex finite element model, as is the case for the Particle-In-Cell (PIC) method [GLHPW2018] or they can even be used to simulate the motion of granular matter, as in the Discrete Element Method (DEM) [Blais2019]. In the case of DEM, the resulting model is not related to the finite element method anymore, but just leads to a system of ordinary differential equation which describes the motion of the particles and the dynamic of their collisions. All of these models can be built using deal.II's particle handling capabilities.

      -

      In the present step, we use particles as massless tracers to illustrate the dynamic of a vortical flow. Since the particles are massless tracers, the position of each particle $i$ is described by the following ordinary differential equation (ODE):

      -\[
+<p>In the present step, we use particles as massless tracers to illustrate the dynamic of a vortical flow. Since the particles are massless tracers, the position of each particle <picture><source srcset=$i$ is described by the following ordinary differential equation (ODE):

      +\[
 \frac{d \textbf{x}_i}{dt} =\textbf{u}(\textbf{x}_i)
-\] +\]" src="form_6512.png"/>

      -

      where $\textbf{x}_i$ is the position of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position. In the present step, this ODE is solved using the explicit Euler method. The resulting scheme is:

      -\[
+<p>where <picture><source srcset=$\textbf{x}_i$ is the position of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position. In the present step, this ODE is solved using the explicit Euler method. The resulting scheme is:

      +\[
 \textbf{x}_{i}^{n+1} = \textbf{x}_{i}^{n} + \Delta t \; \textbf{u}(\textbf{x}_{i}^{n})
-\] +\]" src="form_6515.png"/>

      -

      where $\textbf{x}_{i}^{n+1}$ and $\textbf{x}_{i}^{n}$ are the position of particle $i$ at time $t+\Delta t$ and $t$, respectively and where $\Delta t$ is the time step. In the present step, the velocity at the location of particles is obtained in two different fashions:

    1. The plain program
    131072 132096 12 16 19 11 12 21
    - This program was contributed by Matthias Maier (Texas A&M University), and Ignacio Tomas (Sandia National Laboratories $^{\!\dagger}$).

    -

    $^\dagger$Sandia National Laboratories is a multimission laboratory managed and operated by National Technology & Engineering Solutions of Sandia, LLC, a wholly owned subsidiary of Honeywell International Inc., for the U.S. Department of Energy's National Nuclear Security Administration under contract DE-NA0003525. This document describes objective technical results and analysis. Any subjective views or opinions that might be expressed in the paper do not necessarily represent the views of the U.S. Department of Energy or the United States Government.

    + This program was contributed by Matthias Maier (Texas A&M University), and Ignacio Tomas (Sandia National Laboratories $^{\!\dagger}$).

    +

    $^\dagger$Sandia National Laboratories is a multimission laboratory managed and operated by National Technology & Engineering Solutions of Sandia, LLC, a wholly owned subsidiary of Honeywell International Inc., for the U.S. Department of Energy's National Nuclear Security Administration under contract DE-NA0003525. This document describes objective technical results and analysis. Any subjective views or opinions that might be expressed in the paper do not necessarily represent the views of the U.S. Department of Energy or the United States Government.

    Note
    This tutorial step implements a first-order accurate guaranteed maximum wavespeed method based on a first-order graph viscosity for solving Euler's equations of gas dynamics [GuermondPopov2016]. As such it is presented primarily for educational purposes. For actual research computations you might want to consider exploring a corresponding high-performance implementation of a second-order accurate scheme that uses convex limiting techniques, and strong stability-preserving (SSP) time integration, see [GuermondEtAl2018] (website).
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.3698223
    @@ -152,15 +152,15 @@

    It should be noted that first-order schemes in the context of hyperbolic conservation laws require prohibitively many degrees of freedom to resolve certain key features of the simulated fluid, and thus, typically only serve as elementary building blocks in higher-order schemes [GuermondEtAl2018]. However, we hope that the reader still finds the tutorial step to be a good starting point (in particular with respect to the programming techniques) before jumping into full research codes such as the second-order scheme discussed in [GuermondEtAl2018].

    Euler's equations of gas dynamics

    The compressible Euler's equations of gas dynamics are written in conservative form as follows:

    -\begin{align}
+<picture><source srcset=\begin{align}
 \mathbf{u}_t + \text{div} \, \mathbb{f}(\mathbf{u}) = \boldsymbol{0} ,
-\end{align} +\end{align}" src="form_6526.png"/>

    -

    where $\mathbf{u}(\textbf{x},t):\mathbb{R}^{d} \times \mathbb{R}
-\rightarrow \mathbb{R}^{d+2}$, and $\mathbb{f}(\mathbf{u}):\mathbb{R}^{d+2}
-\rightarrow \mathbb{R}^{(d+2) \times d}$, and $d \geq 1$ is the space dimension. We say that $\mathbf{u} \in \mathbb{R}^{d+2}$ is the state and $\mathbb{f}(\mathbf{u}) \in  \mathbb{R}^{(d+2) \times d}$ is the flux of the system. In the case of Euler's equations the state is given by $\textbf{u} = [\rho, \textbf{m}^\top,E]^{\top}$: where $\rho \in \mathbb{R}^+$ denotes the density, $\textbf{m} \in \mathbb{R}^d$ is the momentum, and $E
-\in \mathbb{R}^+$ is the total energy of the system. The flux of the system $\mathbb{f}(\mathbf{u})$ is defined as

    -\begin{align*}
+<p> where <picture><source srcset=$\mathbf{u}(\textbf{x},t):\mathbb{R}^{d} \times \mathbb{R}
+\rightarrow \mathbb{R}^{d+2}$, and $\mathbb{f}(\mathbf{u}):\mathbb{R}^{d+2}
+\rightarrow \mathbb{R}^{(d+2) \times d}$, and $d \geq 1$ is the space dimension. We say that $\mathbf{u} \in \mathbb{R}^{d+2}$ is the state and $\mathbb{f}(\mathbf{u}) \in  \mathbb{R}^{(d+2) \times d}$ is the flux of the system. In the case of Euler's equations the state is given by $\textbf{u} = [\rho, \textbf{m}^\top,E]^{\top}$: where $\rho \in \mathbb{R}^+$ denotes the density, $\textbf{m} \in \mathbb{R}^d$ is the momentum, and $E
+\in \mathbb{R}^+$ is the total energy of the system. The flux of the system $\mathbb{f}(\mathbf{u})$ is defined as

    +\begin{align*}
 \mathbb{f}(\textbf{u})
 =
 \begin{bmatrix}
@@ -168,32 +168,32 @@
   \rho^{-1} \textbf{m} \otimes \textbf{m} + \mathbb{I} p\\
   \tfrac{\textbf{m}^\top}{\rho} (E + p)
 \end{bmatrix},
-\end{align*} +\end{align*}" src="form_6537.png"/>

    -

    where $\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and $\otimes$ denotes the tensor product. Here, we have introduced the pressure $p$ that, in general, is defined by a closed-form equation of state. In this tutorial we limit the discussion to the class of polytropic ideal gases for which the pressure is given by

    -\begin{align*}
+<p> where <picture><source srcset=$\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and $\otimes$ denotes the tensor product. Here, we have introduced the pressure $p$ that, in general, is defined by a closed-form equation of state. In this tutorial we limit the discussion to the class of polytropic ideal gases for which the pressure is given by

    +\begin{align*}
 p = p(\textbf{u}) := (\gamma -1) \Big(E -
 \tfrac{|\textbf{m}|^2}{2\,\rho}
 \Big),
-\end{align*} +\end{align*}" src="form_6539.png"/>

    -

    where the factor $\gamma \in (1,5/3]$ denotes the ratio of specific heats.

    +

    where the factor $\gamma \in (1,5/3]$ denotes the ratio of specific heats.

    Solution theory

    Hyperbolic conservation laws, such as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 \mathbf{u}_t + \text{div} \, \mathbb{f}(\mathbf{u}) = \boldsymbol{0},
-\end{align*} +\end{align*}" src="form_6541.png"/>

    -

    pose a significant challenge with respect to solution theory. An evident observation is that rewriting the equation in variational form and testing with the solution itself does not lead to an energy estimate because the pairing $\langle \text{div} \, \mathbb{f}(\mathbf{u}), \mathbf{u}\rangle$ (understood as the $L^2(\Omega)$ inner product or duality pairing) is not guaranteed to be non-negative. Notions such as energy-stability or $L^2(\Omega)$-stability are (in general) meaningless in this context.

    -

    Historically, the most fruitful step taken in order to deepen the understanding of hyperbolic conservation laws was to assume that the solution is formally defined as $\mathbf{u} := \lim_{\epsilon \rightarrow
-0^+} \mathbf{u}^{\epsilon}$ where $\mathbf{u}^{\epsilon}$ is the solution of the parabolic regularization

    -\begin{align}
+<p> pose a significant challenge with respect to solution theory. An evident observation is that rewriting the equation in variational form and testing with the solution itself does not lead to an energy estimate because the pairing <picture><source srcset=$\langle \text{div} \, \mathbb{f}(\mathbf{u}), \mathbf{u}\rangle$ (understood as the $L^2(\Omega)$ inner product or duality pairing) is not guaranteed to be non-negative. Notions such as energy-stability or $L^2(\Omega)$-stability are (in general) meaningless in this context.

    +

    Historically, the most fruitful step taken in order to deepen the understanding of hyperbolic conservation laws was to assume that the solution is formally defined as $\mathbf{u} := \lim_{\epsilon \rightarrow
+0^+} \mathbf{u}^{\epsilon}$ where $\mathbf{u}^{\epsilon}$ is the solution of the parabolic regularization

    +\begin{align}
 \mathbf{u}_t^{\epsilon} + \text{div} \, \mathbb{f}(\mathbf{u}^{\epsilon})
 - {\epsilon} \Delta \mathbf{u}^{\epsilon} = 0.
-\end{align} +\end{align}" src="form_6546.png"/>

    -

    Such solutions, which are understood as the solution recovered in the zero-viscosity limit, are often referred to as viscosity solutions. (This is, because physically $\epsilon$ can be understood as related to the viscosity of the fluid, i.e., a quantity that indicates the amount of friction neighboring gas particles moving at different speeds exert on each other. The Euler equations themselves are derived under the assumption of no friction, but can physically be expected to describe the limiting case of vanishing friction or viscosity.) Global existence and uniqueness of such solutions is an open issue. However, we know at least that if such viscosity solutions exists they have to satisfy the constraint $\textbf{u}(\mathbf{x},t) \in \mathcal{B}$ for all $\mathbf{x} \in \Omega$ and $t \geq 0$ where

    -\begin{align}
+<p> Such solutions, which are understood as the solution recovered in the zero-viscosity limit, are often referred to as <em>viscosity solutions</em>. (This is, because physically <picture><source srcset=$\epsilon$ can be understood as related to the viscosity of the fluid, i.e., a quantity that indicates the amount of friction neighboring gas particles moving at different speeds exert on each other. The Euler equations themselves are derived under the assumption of no friction, but can physically be expected to describe the limiting case of vanishing friction or viscosity.) Global existence and uniqueness of such solutions is an open issue. However, we know at least that if such viscosity solutions exists they have to satisfy the constraint $\textbf{u}(\mathbf{x},t) \in \mathcal{B}$ for all $\mathbf{x} \in \Omega$ and $t \geq 0$ where

    +\begin{align}
   \mathcal{B} = \big\{ \textbf{u} =
   [\rho, \textbf{m}^\top,E]^{\top} \in \mathbb{R}^{d+2} \, \big |
   \
@@ -203,81 +203,81 @@
   \
   s(\mathbf{u}) \geq \min_{x \in \Omega} s(\mathbf{u}_0(\mathbf{x}))
   \big\}.
-\end{align} +\end{align}" src="form_6550.png"/>

    -

    Here, $s(\mathbf{u})$ denotes the specific entropy

    -\begin{align}
+<p> Here, <picture><source srcset=$s(\mathbf{u})$ denotes the specific entropy

    +\begin{align}
   s(\mathbf{u}) = \ln \Big(\frac{p(\mathbf{u})}{\rho^{\gamma}}\Big).
-\end{align} +\end{align}" src="form_6552.png"/>

    -

    We will refer to $\mathcal{B}$ as the invariant set of Euler's equations. In other words, a state $\mathbf{u}(\mathbf{x},t)\in\mathcal{B}$ obeys positivity of the density, positivity of the internal energy, and a local minimum principle on the specific entropy. This condition is a simplified version of a class of pointwise stability constraints satisfied by the exact (viscosity) solution. By pointwise we mean that the constraint has to be satisfied at every point of the domain, not just in an averaged (integral, or high order moments) sense.

    -

    In context of a numerical approximation, a violation of such a constraint has dire consequences: it almost surely leads to catastrophic failure of the numerical scheme, loss of hyperbolicity, and overall, loss of well-posedness of the (discrete) problem. It would also mean that we have computed something that can not be interpreted physically. (For example, what are we to make of a computed solution with a negative density?) In the following we will formulate a scheme that ensures that the discrete approximation of $\mathbf{u}(\mathbf{x},t)$ remains in $\mathcal{B}$.

    +

    We will refer to $\mathcal{B}$ as the invariant set of Euler's equations. In other words, a state $\mathbf{u}(\mathbf{x},t)\in\mathcal{B}$ obeys positivity of the density, positivity of the internal energy, and a local minimum principle on the specific entropy. This condition is a simplified version of a class of pointwise stability constraints satisfied by the exact (viscosity) solution. By pointwise we mean that the constraint has to be satisfied at every point of the domain, not just in an averaged (integral, or high order moments) sense.

    +

    In context of a numerical approximation, a violation of such a constraint has dire consequences: it almost surely leads to catastrophic failure of the numerical scheme, loss of hyperbolicity, and overall, loss of well-posedness of the (discrete) problem. It would also mean that we have computed something that can not be interpreted physically. (For example, what are we to make of a computed solution with a negative density?) In the following we will formulate a scheme that ensures that the discrete approximation of $\mathbf{u}(\mathbf{x},t)$ remains in $\mathcal{B}$.

    Variational versus collocation-type discretizations

    Following step-9, step-12, step-33, and step-67, at this point it might look tempting to base a discretization of Euler's equations on a (semi-discrete) variational formulation:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (\partial_t\mathbf{u}_{h},\textbf{v}_h)_{L^2(\Omega)}
   - ( \mathbb{f}(\mathbf{u}_{h}) ,\text{grad} \, \textbf{v}_{h})_{L^2(\Omega)}
   + s_h(\mathbf{u}_{h},\textbf{v}_h)_{L^2(\Omega)} = \boldsymbol{0}
   \quad\forall \textbf{v}_h \in \mathbb{V}_h.
-\end{align*} +\end{align*}" src="form_6555.png"/>

    -

    Here, $\mathbb{V}_h$ is an appropriate finite element space, and $s_h(\cdot,\cdot)_{L^2(\Omega)}$ is some linear stabilization method (possibly complemented with some ad-hoc shock-capturing technique, see for instance Chapter 5 of [GuermondErn2004] and references therein). Most time-dependent discretization approaches described in the deal.II tutorials are based on such a (semi-discrete) variational approach. Fundamentally, from an analysis perspective, variational discretizations are conceived to provide some notion of global (integral) stability, meaning an estimate of the form

    -\begin{align*}
+<p> Here, <picture><source srcset=$\mathbb{V}_h$ is an appropriate finite element space, and $s_h(\cdot,\cdot)_{L^2(\Omega)}$ is some linear stabilization method (possibly complemented with some ad-hoc shock-capturing technique, see for instance Chapter 5 of [GuermondErn2004] and references therein). Most time-dependent discretization approaches described in the deal.II tutorials are based on such a (semi-discrete) variational approach. Fundamentally, from an analysis perspective, variational discretizations are conceived to provide some notion of global (integral) stability, meaning an estimate of the form

    +\begin{align*}
   |\!|\!| \mathbf{u}_{h}(t) |\!|\!| \leq |\!|\!| \mathbf{u}_{h}(0) |\!|\!|
-\end{align*} +\end{align*}" src="form_6558.png"/>

    -

    holds true, where $|\!|\!| \cdot |\!|\!| $ could represent the $L^2(\Omega)$-norm or, more generally, some discrete (possibly mesh dependent) energy-norm. Variational discretizations of hyperbolic conservation laws have been very popular since the mid eighties, in particular combined with SUPG-type stabilization and/or upwinding techniques (see the early work of [Brooks1982] and [Johnson1986]). They have proven to be some of the best approaches for simulations in the subsonic shockless regime and similarly benign situations.

    +

    holds true, where $|\!|\!| \cdot |\!|\!| $ could represent the $L^2(\Omega)$-norm or, more generally, some discrete (possibly mesh dependent) energy-norm. Variational discretizations of hyperbolic conservation laws have been very popular since the mid eighties, in particular combined with SUPG-type stabilization and/or upwinding techniques (see the early work of [Brooks1982] and [Johnson1986]). They have proven to be some of the best approaches for simulations in the subsonic shockless regime and similarly benign situations.

    However, in the transonic and supersonic regimes, and shock-hydrodynamics applications the use of variational schemes might be questionable. In fact, at the time of this writing, most shock-hydrodynamics codes are still firmly grounded on finite volume methods. The main reason for failure of variational schemes in such extreme regimes is the lack of pointwise stability. This stems from the fact that a priori bounds on integrated quantities (e.g. integrals of moments) have in general no implications on pointwise properties of the solution. While some of these problems might be alleviated by the (perpetual) chase of the right shock capturing scheme, finite difference-like and finite volume schemes still have an edge in many regards.

    In this tutorial step we therefore depart from variational schemes. We will present a completely algebraic formulation (with the flavor of a collocation-type scheme) that preserves constraints pointwise, i.e.,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \textbf{u}_h(\mathbf{x}_i,t) \in \mathcal{B}
   \;\text{at every node}\;\mathbf{x}_i\;\text{of the mesh}.
-\end{align*} +\end{align*}" src="form_6560.png"/>

    Contrary to finite difference/volume schemes, the scheme implemented in this step maximizes the use of finite element software infrastructure, works on any mesh, in any space dimension, and is theoretically guaranteed to always work, all the time, no exception. This illustrates that deal.II can be used far beyond the context of variational schemes in Hilbert spaces and that a large number of classes, modules and namespaces from deal.II can be adapted for such a purpose.

    Description of the scheme

    -

    Let $\mathbb{V}_h$ be scalar-valued finite dimensional space spanned by a basis $\{\phi_i\}_{i \in \mathcal{V}}$ where: $\phi_i:\Omega \rightarrow
-\mathbb{R}$ and $\mathcal{V}$ is the set of all indices (nonnegative integers) identifying each scalar Degree of Freedom (DOF) in the mesh. Therefore a scalar finite element functional $u_h \in \mathbb{V}_h$ can be written as $u_h = \sum_{i \in \mathcal{V}} U_i \phi_i$ with $U_i \in
-\mathbb{R}$. We introduce the notation for vector-valued approximation spaces $\pmb{\mathbb{V}}_h := \{\mathbb{V}_h\}^{d+2}$. Let $\mathbf{u}_h
-\in \pmb{\mathbb{V}}_h$, then it can be written as $\mathbf{u}_h = \sum_{i
-\in \mathcal{V}} \mathbf{U}_i \phi_i$ where $\mathbf{U}_i \in
-\mathbb{R}^{d+2}$ and $\phi_i$ is a scalar-valued shape function.

    +

    Let $\mathbb{V}_h$ be scalar-valued finite dimensional space spanned by a basis $\{\phi_i\}_{i \in \mathcal{V}}$ where: $\phi_i:\Omega \rightarrow
+\mathbb{R}$ and $\mathcal{V}$ is the set of all indices (nonnegative integers) identifying each scalar Degree of Freedom (DOF) in the mesh. Therefore a scalar finite element functional $u_h \in \mathbb{V}_h$ can be written as $u_h = \sum_{i \in \mathcal{V}} U_i \phi_i$ with $U_i \in
+\mathbb{R}$. We introduce the notation for vector-valued approximation spaces $\pmb{\mathbb{V}}_h := \{\mathbb{V}_h\}^{d+2}$. Let $\mathbf{u}_h
+\in \pmb{\mathbb{V}}_h$, then it can be written as $\mathbf{u}_h = \sum_{i
+\in \mathcal{V}} \mathbf{U}_i \phi_i$ where $\mathbf{U}_i \in
+\mathbb{R}^{d+2}$ and $\phi_i$ is a scalar-valued shape function.

    Note
    We purposely refrain from using vector-valued finite element spaces in our notation. Vector-valued finite element spaces are natural for variational formulations of PDE systems (e.g. Navier-Stokes). In such context, the interactions that have to be computed describe interactions between DOFs: with proper renumbering of the vector-valued DoFHandler (i.e. initialized with an FESystem) it is possible to compute the block-matrices (required in order to advance the solution) with relative ease. However, the interactions that have to be computed in the context of time-explicit collocation-type schemes (such as finite differences and/or the scheme presented in this tutorial) can be better described as interactions between nodes (not between DOFs). In addition, in our case we do not solve a linear equation in order to advance the solution. This leaves very little reason to use vector-valued finite element spaces both in theory and/or practice.
    -

    We will use the usual Lagrange finite elements: let $\{\mathbf{x}_i\}_{i \in
-\mathcal{V}}$ denote the set of all support points (see this glossary entry), where $\mathbf{x}_i \in \mathbb{R}^d$. Then each index $i \in
-\mathcal{V}$ uniquely identifies a support point $\mathbf{x}_i$, as well as a scalar-valued shape function $\phi_i$. With this notation at hand we can define the (explicit time stepping) scheme as:

    -\begin{align*}
+<p>We will use the usual Lagrange finite elements: let <picture><source srcset=$\{\mathbf{x}_i\}_{i \in
+\mathcal{V}}$ denote the set of all support points (see this glossary entry), where $\mathbf{x}_i \in \mathbb{R}^d$. Then each index $i \in
+\mathcal{V}$ uniquely identifies a support point $\mathbf{x}_i$, as well as a scalar-valued shape function $\phi_i$. With this notation at hand we can define the (explicit time stepping) scheme as:

    +\begin{align*}
   m_i \frac{\mathbf{U}_i^{n+1} - \mathbf{U}_i^{n}}{\tau}
   + \sum_{j \in \mathcal{I}(i)} \mathbb{f}(\mathbf{U}_j^{n})\cdot
   \mathbf{c}_{ij} - \sum_{j \in \mathcal{I}(i)}
   d_{ij} \mathbf{U}_j^{n} = \boldsymbol{0} \, ,
-\end{align*} +\end{align*}" src="form_6574.png"/>

    where

    -

    The definition of $\lambda_{\text{max}} (\mathbf{U},\mathbf{V},
-\textbf{n})$ is far from trivial and we will postpone the precise definition in order to focus first on some algorithmic and implementation questions. We note that