@article{10023, abstract = {We study the temporal dissipation of variance and relative entropy for ergodic Markov Chains in continuous time, and compute explicitly the corresponding dissipation rates. These are identified, as is well known, in the case of the variance in terms of an appropriate Hilbertian norm; and in the case of the relative entropy, in terms of a Dirichlet form which morphs into a version of the familiar Fisher information under conditions of detailed balance. Here we obtain trajectorial versions of these results, valid along almost every path of the random motion and most transparent in the backwards direction of time. Martingale arguments and time reversal play crucial roles, as in the recent work of Karatzas, Schachermayer and Tschiderer for conservative diffusions. Extensions are developed to general “convex divergences” and to countable state-spaces. The steepest descent and gradient flow properties for the variance, the relative entropy, and appropriate generalizations, are studied along with their respective geometries under conditions of detailed balance, leading to a very direct proof for the HWI inequality of Otto and Villani in the present context.}, author = {Karatzas, Ioannis and Maas, Jan and Schachermayer, Walter}, issn = {1526-7555}, journal = {Communications in Information and Systems}, keywords = {Markov Chain, relative entropy, time reversal, steepest descent, gradient flow}, number = {4}, pages = {481--536}, publisher = {International Press}, title = {{Trajectorial dissipation and gradient flow for the relative entropy in Markov chains}}, doi = {10.4310/CIS.2021.v21.n4.a1}, volume = {21}, year = {2021}, }