<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet href="../images/Distributome_style.css" type="text/css"?>

<distributome version="1.1"
              xmlns="http://www.distributome.org"
              xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
              xsi:schemaLocation="http://www.distributome.org/distributome.xsd">


    <distributions>
		<distribution id="arcsine">
			<name>arcsine distribution</name>
			<name>Levy arcsine distribution</name>
			<type>continuous</type>
			<model>The arcsine distribution models the proportion of time that Brownian motion is positive.</model>
			<parameter>\( a \in (-\infty, \infty) \), location</parameter>
			<parameter>\( w \in (0, \infty) \), scale</parameter>
			<standard>\( a = 0 \), \( w = 1 \)</standard>
			<support>\(x \in [a, a + w] \)</support>
			<pdf>\(f(x) = \frac{1}{\pi\sqrt{(x - a) (a + w -x)}}\)</pdf>
			<mode>\(x \in \{a, a + w\} \)</mode>
			<cdf>\(F(x) = \frac{2}{\pi}\arcsin\left(\sqrt \frac{x - a}{w}\right)\)</cdf>
			<qf>\(Q(u) = a + w \sin^2\left(\frac{\pi}{2} u\right), \; u \in (0, 1)\)</qf>
			<mgf>\( M(t) = e^{a t} \sum_{n=0}^\infty \left(\prod_{j=0}^{n-1} \frac{2 j + 1}{2 j + 2}\right) \frac{w^n t^n}{n!}, \; t \in (-\infty, \infty) \)</mgf>
			<mean>\(a + \frac{1}{2} w\)</mean>
			<variance>\(\frac{1}{8} w^2\)</variance>
			<skew>\(0\)</skew>
			<kurt>\(-\frac{3}{2}\)</kurt>
			<median>\(a + \frac{1}{2} w\)</median>
			<q1>\(a + \frac{2 - \sqrt{2}}{4} w\)</q1>
			<q3>\(a + \frac{2 + \sqrt{2}}{4} w\)</q3>
			<history>Derived by Paul Levy in 1939 as the distribution of proportion of time that Brownian motion is positive.</history>
			<cite>arnold1980some</cite>
		</distribution>

        <distribution id="Bernoulli">
            <name>Bernoulli distribution</name>
            <type>discrete</type>
            <model>The Bernoulli distribution governs an indicator random variable.</model>
            <parameter>\(p \in [0, 1]\), the probability of the event</parameter>
            <standard>\( p = \frac{1}{2} \)</standard>
            <support>\(\{0, 1\}\)</support>
            <pdf>\(f(x) = p^x (1 - p)^{1 - x}, \; x \in \{0, 1\}\)</pdf>
            <mode>\(\lfloor 2 p \rfloor\)</mode>
            <cdf>\(F(x) = (1 - p)^{1 - x}, \; x \in \{0, 1\}\)</cdf>
            <qf>\(Q(u) = F^{-1}(u), \; u \in [0, 1]\) where \(F\) is the distribution function</qf>
            <pgf>\(G(t) = 1 - p + p t, \; t \in (-\infty, \infty)\)</pgf>
            <mgf>\(M(t) = 1 - p + p e^t, \; t \in (-\infty, \infty)\)</mgf>
            <cf>\(\varphi(t) = 1 - p + p e^{i t}, \; t \in (-\infty, \infty)\)</cf>
            <moments type="raw">\(\mu(n) = p, \; n \in \{0, 1, \ldots\}\)</moments>
            <mean>\(p\)</mean>
            <variance>\(p (1-p)\)</variance>
            <skew>\(\frac{1 - 2 p}{\sqrt{p (1 - p)}}\)</skew>
            <kurt>\(\frac{1- 6 p + 6 p^2}{p (1 - p)}\)</kurt>
            <entropy>\(-(1 - p) \ln(1 - p) - p \ln(p)\)</entropy>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <family>power series </family>
            <family>exponential</family>
            <history>Named for Jacob Bernoulli</history>
            <cite>marshall1985family</cite>
        </distribution>

        <distribution id="beta">
            <name>beta distribution</name>
            <type>continuous</type>
            <model>The beta distribution is used to model random proportions and probabilities. </model>
            <parameter>\(\alpha \in (0, \infty)\), the left shape parameter</parameter>
            <parameter>\(\beta \in (0, \infty)\), the right shape parameter</parameter>
            <standard>\( \alpha = 1 \), \( \beta = 1 \)</standard>
            <support>\((0, 1)\)</support>
            <pdf>\(f(x) = \frac{1}{B(\alpha, \beta)} x^{\alpha-1}(1 - x)^{\beta-1}, \; x \in (0, 1)\), where \( B \) is the beta function</pdf>
            <mode>\(\frac{\alpha - 1}{\alpha + \beta - 2}; \; \alpha \in (1, \infty), \beta \in (1, \infty)\)</mode>
            <cdf>\(F(x) = \frac{B(x; a, b)}{B(a, b)}, \; x \in (0, 1)\), where \( x \mapsto B(x; a, b) \) is the incomplete beta function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\), where \(F\) is the distribution function.</qf>
            <mgf>\(1  +\sum_{k=1}^{\infty} \left( \prod_{r=0}^{k-1} \frac{\alpha+r}{\alpha+\beta+r} \right) \frac{t^k}{k!}\)</mgf>
            <mean>\(\frac{\alpha}{\alpha + \beta}\)</mean>
            <variance>\(\frac{\alpha \beta}{(\alpha + \beta)^2 (\alpha + \beta + 1)}\)</variance>
            <skew>\(\frac{2\,(\beta-\alpha)\sqrt{\alpha+\beta+1}}{(\alpha+\beta+2)\sqrt{\alpha\beta}}\)</skew>
            <kurt>\(\frac{6[(\alpha - \beta)^2 (\alpha +\beta + 1) - \alpha \beta (\alpha + \beta + 2)]}{\alpha \beta (\alpha + \beta + 2) (\alpha + \beta + 3)}\)</kurt>
            <entropy>\(\ln(B(\alpha, \beta)) - (\alpha - 1) \psi(\alpha) - (\beta - 1) \psi(\beta) + (\alpha + \beta - 2) \psi(\alpha + \beta)\) where \(\psi\) is the digamma function</entropy>
            <median>\(Q\left(\frac{1}{2}\right) \)</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <family>exponential</family>
            <cite>mcdonald1995generalization</cite>
        </distribution>

        <distribution id="general beta">
            <name>beta general distribution</name>
            <name>beta generalized distribution</name>
            <name>generalized beta distribution</name>
            <type>continuous</type>
            <model>The generalized beta distribution is used to model random proportions and probabilities. 
            	It extends the (standard) beta distribution, supported on \([0, 1]\) to an arbitrary range
            	\([L, R]\).</model>
            <parameter>\(\alpha \in (0, \infty)\), the left shape parameter</parameter>
            <parameter>\(\beta \in (0, \infty)\), the right shape parameter</parameter>
            <parameter>\(L \in (-\infty, \infty)\), the left limit of the support range</parameter>
            <parameter>\(R \in (L, \infty)\), the right limit of the support range</parameter>
            <support>\((L, R)\)</support>
            <pdf><![CDATA[
	            \(\begin{cases}  
		            \frac{\beta}{R - L} & \text{for } x < L \text{ or } X > R \\
		            \frac{\beta}{R - L} & \text{for } x == L \text{ and } \alpha == 1 \\
		            \infty & \text{for } x == L \text{ and } \alpha > 1 \\
		            0 & \text{for } x == L \text{ and } \alpha > 1 \\
		            \frac{\alpha}{R - L} & \text{for } x == R \text{ and } \beta == 1 \\
		            \infty & \text{for } x == R \text{ and } \beta < 1 \\
		            0 & \text{for } x == R \text{ and } \beta > 1 \\
		            \exp(\log\Gamma(\alpha + \beta) - (\log\Gamma(\alpha) + \log\Gamma(\beta)) - \log(R - L) + 
		            	(\alpha-1)*\log(\frac{x-L}{R-L})+(\beta-1)*\log(\frac{R-x}{R-L})) & otherwise
	            \end{cases}\) 
        		]]>
        	</pdf>
            <mode><![CDATA[
            	\(\begin{cases}  
		            L & \text{for } \alpha <1 \\
		            R & \text{for } \beta \leq 1 \\
		            L + \frac{(R - L) * (\alpha - 1)}{a + b - 2} & otherwise
	            \end{cases}\)
        		]]>
            </mode>
            <cdf>\(I_{\frac{x-L}{R}}(\alpha,\beta)\!\)</cdf>
            <mean>\( \frac{\alpha * (R - L)}{\alpha + \beta}\)</mean>
            <variance>\( \frac{(R-L)*(R-L)*\alpha*\beta}{(\alpha+\beta)^2*(\alpha+\beta+1)} \)</variance>
            <family>exponential</family>
            <cite>mcdonald1995generalization</cite>
        </distribution>

        <distribution id="inverse beta">
            <name>inverse beta distribution</name>
            <type>continuous</type>
            <model>The inverse beta distribution, as the name suggests, is the inverse probability dsitribution of a Beta-distributed variable. </model>
            <parameter>\(\alpha \in (0, \infty)\), the left shape parameter</parameter>
            <parameter>\(\beta \in (0, \infty)\), the right shape parameter</parameter>
            <support>\((0, 1)\)</support>
            <family>exponential</family>
            <cite>mcdonald1995generalization</cite>
        </distribution>

        <distribution id="binomial">
            <name>binomial distribution</name>
            <type>discrete</type>
            <model>The binomial distribution models the number of successes in a fixed number of independent trials each with the same probability of success.</model>
            <parameter>\(n \in \{1, 2, \ldots\}\), the number of trials</parameter>
            <parameter>\(p \in [0, 1]\), the probability of success</parameter>
            <standard>\( n = 1, \; p = \frac{1}{2} \)</standard>
            <support>\(\{0, 1, \ldots, n\}\)</support>
            <pdf>\(f(x) = {n \choose x} p^x (1 - p)^{n - x}, \; x \in \{0, 1, \ldots, n\}\)</pdf>
            <mode>\(\lfloor (n + 1) p \rfloor\)</mode>
            <cdf>\(F(x) = B(1 - p; n - x, x + 1), \; x \in \{0, 1, \ldots, n\}\) where \(B\) is the incomplete beta function</cdf>
            <qf>\(Q(r) = F^{-1}(r), \; r \in [0, 1]\) where \(F\) is the distribution function</qf>
            <pgf>\(G(t) = (1 - p + p t)^n, \; t \in (-\infty, \infty)\)</pgf>
            <mgf>\(M(t) = (1 - p + p e^t)^n, \; t \in (-\infty, \infty)\)</mgf>
            <cf>\(\varphi(t) = (1 - p + p e^{i t})^n, \; t \in (-\infty, \infty)\)</cf>
            <mean>\(n p\)</mean>
            <variance>\(n p (1 - p)\)</variance>
            <skew>\(\frac{1 - 2 p}{\sqrt{n p (1 - p)}}\)</skew>
            <kurt>\(\frac{1 - 6 p (1 - p)}{n p (1 - p)}\)</kurt>
            <entropy>\(\frac{1}{2} \log_2[2 \pi e n p (1 - p)] + O\left(\frac{1}{n}\right)\)</entropy>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{1}{3}\right)\) where \(Q\) is the quantile function</q3>
            <family>power series </family>
            <family>exponential</family>
            <history>The binomial distribution is attributed to Jacob Bernoulli</history>
            <cite>altham1978two</cite>
        </distribution>

        <distribution id="beta-binomial">
            <name>beta-binomial distribution</name>
            <type>discrete</type>
            <model>The beta-binomial distribution arises when the success parameter in the binomial distribution is randomized and given a beta distribution.</model>
            <parameter>\(n \in \{1, 2, \ldots\}\), the number of trials</parameter>
            <parameter>\(a \in (0, \infty)\), the left beta parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the right beta parameter</parameter>
            <standard>\( n = 1, \; a = 1, \; b = 1 \)</standard>
            <support>\(\{0, 1, \ldots, n\}\)</support>
            <pdf>\( f(x) = \binom{n}{x} \frac{a^{[x]} b^{[n - x]}}{(a + b)^{[n]}} \), \( x \in \{0, 1, \ldots, n\} \) where \( m^{[j]} \) denotes the rising power of \( m \) of order \( j \)</pdf>
            <cdf>\(F(x) = \sum_0^x f(t), \quad x \in \{0, 1, \ldots, n\}\) where \(f\) is the probability density function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \quad p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mgf>\(_{2}F_{1}(-n, a; a + b; 1 - e^t) \)</mgf>
            <mean>\(\frac{n a}{a +  b} \)</mean>
            <variance>\(n \frac{n a b (a + b + n)}{(a + b)^2 (a + b + 1)}\)</variance>
            <skew>\(\frac{(a + b + 2 n)(b - a)}{(a + b + 2)} \sqrt{\frac{1 + a + b}{n a b (n + a + b)}}\)</skew>
            <kurt>\(\frac{(a + b)^2 ( + 1 + b)}{n a b (a + b + 2)(a + b + 3)(a + b + n)} \left[(a + b)(a + b - 1 + 6 n) + 3 a b (n - 2) + 6 n^2 - \frac{3 a b n (6-n)}{a + b} - \frac{18 a b n^2}{(a + b)^2}\right]\)</kurt>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <cite>altham1978two</cite>
        </distribution>

        <distribution id="beta-negative binomial">
            <name>beta-negative binomial distribution</name>
            <type>discrete</type>
            <model>The beta-negative binomial distribution arises when the success parameter in the negative binomial distribution is randomized and given a beta distribution.</model>
            <parameter>\(k \in \{1, 2, \ldots\}\), the number of trials</parameter>
            <parameter>\(a \in (0, \infty)\), the left beta parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the right beta parameter</parameter>
            <standard>\( k = 1, \; a = 1, \; b = 1 \)</standard>
            <support>\(\{k, k + 1, \ldots \}\)</support>
            <pdf>\( f(x) = \binom{n - 1}{x - 1} \frac{a^{[x]} b^{[n-x]}}{(a + b)^{[n]}} \), where \( r^{[j]} \) denotes the rising power of order \( j \)</pdf>
            <cdf>\(F(x) = \sum_0^x f(t), \quad x \in \{0, 1, \ldots, n\}\) where \(f\) is the probability density function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \quad p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mean>\(k \frac{a + b - 1}{a - 1}\) if \( a \gt 1 \)</mean>
            <variance>\( k \frac{a + b - 1}{(a - 1)(a - 2)}[b + k (a + b - 2)] - k^2 \left(\frac{a + b - 1}{a - 1}\right)^2 \) if \( a \gt 2 \)</variance>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <cite>johnson2005univariate</cite>
        </distribution>

        <distribution id="Cauchy">
            <name>Cauchy distribution </name>
            <name>Cauchy-Lorentz distribution </name>
            <name>Lorentz distribution </name>
            <name>Breit-Wigner distribution</name>
            <type>continuous</type>
            <model>The general Cauchy distribution is the location-scale family associated with the standard Cauchy distribution </model>
            <parameter>\(a \in (-\infty, \infty)\). the location parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <support>\(\displaystyle x \in (-\infty, \infty)\!\)</support>
            <standard>\( a = 0, \; b = 1 \)</standard>
            <pdf>\(\frac{1}{\pi b \, \left[1+\left(\frac{x - a}{b}\right)^2\right]}\!\)</pdf>
            <mode>\( a \)</mode>
            <cdf>\(\frac{1}{\pi}\arctan\left(\frac{x - a}{b} \right) + \frac{1}{2} \)</cdf>
            <qf>\(Q(p) = F^{-1}(p) = a + b \tan \left(\pi (p - \frac{1}{2}) \right), \; p \in (0, 1)\)</qf>
            <mgf>Does not exist</mgf>
            <cf>\(\varphi(t) = \exp(a i t - b |t|), \; t \in (-\infty, \infty)\)</cf>
            <mean>Does not exist</mean>
            <variance>Does not exist</variance>
            <skew>Does not exist</skew>
            <kurt>Does not exist</kurt>
            <entropy>\(\ln (4 \pi b ) \)</entropy>
            <median>\( a \)</median>
            <q1>\(a - b\)</q1>
            <q3>\(a + b\)</q3>
            <family>location</family>
            <family>scale</family>
            <history>The distribution was first used by Simeon Poisson in 1824 and was re-introduced by Augustin Cauchy in 1853. It is also named for Hendrick Lorentz.</history>
            <cite>haas1970inferences</cite>
        </distribution>

        <distribution id="chi-square">
            <name>chi-square distribution </name>
            <name>chi-squared distribution</name>
            <type>continuous</type>
            <model>The chi-square distribution governs the sum of squares of independent standard normal variable.</model>
            <parameter>\(n \in (0, \infty)\), degrees of freedom</parameter>
            <standard>\( n = 1 \)</standard>
            <support>\((0, \infty)\)</support>
            <pdf>\(\frac{1}{2^{\frac{n}{2}}\Gamma\left(\frac{n}{2}\right)}\; x^{\frac{n}{2}-1} e^{-\frac{x}{2}}\,\)</pdf>
            <mode>\(n - 2, \; n \in [2, \infty)\)</mode>
            <cdf>\(\frac{1}{\Gamma\left(\frac{n}{2}\right)}\;\gamma\left(\frac{n}{2},\,\frac{x}{2}\right)\)</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in [0, 1)\) where \(F\) is the distribution function</qf>
            <mgf>\(M(t) = \frac{1}{(1 - 2 t)^{n/2}}, \; t \in (-\infty, \frac{1}{2})\)</mgf>
            <cf>\(\frac{1}{(1 - 2 i t^{n/2})}, \; t \in (-\infty, \infty)\)</cf>
            <mean>\(n\)</mean>
            <variance>\(2 n\)</variance>
            <skew>\(\sqrt{8 / n}\,\)</skew>
            <kurt>\(12 / n\)</kurt>
            <entropy>\(\frac{n}{2} + \ln[2 \Gamma(n / 2)] + (1 - n / 2) \psi(n / 2)\)</entropy>
            <median>\(\approx n \left(1 - \frac{2}{9n}\right)^3\)</median>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <family>exponential</family>
            <history>The chi-square distribution was first used by Karl Pearson in 1900.</history>
            <cite>lancaster2005chi</cite>
        </distribution>

        <distribution id="non-central chi-square">
            <name>non-central chi-square distribution</name>
            <name>non-central chi-squared distribution</name>
            <type>continuous</type>
            <model>The non-central chi-square distribution distribution is a generalization of the chi-squared distribution, which arises in the power analysis of statistical tests where the null distribution is asymptotically a chi-squared distribution; important examples of such tests are the likelihood ratio tests.</model>
            <parameter>\(k \in (0, \infty)\), degrees of freedom</parameter>
            <parameter>\(\lambda \in (0, \infty)\), non-centrality parameter</parameter>
            <support>\(x \in [0; +\infty)\,\)</support>
            <pdf>\(\frac{1}{2}e^{-(x+\lambda)/2}\left (\frac{x}{\lambda} \right)^{k/4-1/2}
                I_{k/2-1}(\sqrt{\lambda x})\)</pdf>
            <cdf>\(F(x) = 1 - Q_{\frac{k}{2}} \left( \sqrt{\lambda}, \sqrt{x} \right)\),
                where \(Q_M(a,b)\) is the Marcum Q-function</cdf>
            <mgf>\(\frac{\exp\left(\frac{ \lambda t}{1-2t }\right)}{(1-2 t)^{k/2}}, \) for \( 2t \lt 1\)</mgf>
            <cf>\(\frac{1}{(1 - 2 i t^{n/2}}) \; t \in (-\infty, \infty)\)</cf>
            <mean>\(k+\lambda\)</mean>
            <variance>\(2(k+2\lambda)\)</variance>
            <skew>\(\frac{2^{3/2}(k+3\lambda)}{(k+2\lambda)^{3/2}}\)</skew>
            <kurt>\(\frac{12(k+4\lambda)}{(k+2\lambda)^2}\)</kurt>
            <family>exponential</family>
            <cite>sankaran1959non</cite>
        </distribution>

        <distribution id="chi">
            <name>chi distribution</name>
            <type>continuous</type>
            <model>The chi distribution governs the square root of a variable with the chi-square distribution.</model>
            <parameter>\(n \in \{1, 2, \ldots\}\), the degrees of freedom</parameter>
            <support>\(x\in[0;\infty)\)</support>
            <pdf>\(\frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)}\)</pdf>
            <mode>\(\sqrt{k-1}\,\)for\(k\ge1\)</mode>
            <cdf>\(P(k/2,x^2/2)\,\)</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <moments>\(\mu(k) = \frac{2^{k/2} \Gamma[(n+k)/2)]}{\Gamma(n/2)}, \; n \in \{1, 2, \ldots\}\) where \(\Gamma\) is the gamma function</moments>
            <mean>\(\mu=\sqrt{2}\,\frac{\Gamma((k+1)/2)}{\Gamma(k/2)}\)</mean>
            <variance>\(\sigma^2=k-\mu^2\,\)</variance>
            <skew>\(\gamma_1=\frac{\mu}{\sigma^3}\,(1-2\sigma^2)\)</skew>
            <kurt>\(\frac{2}{\sigma^2}(1-\mu\sigma\gamma_1-\sigma^2)\)</kurt>
            <entropy>\(\ln[\Gamma(n/2)] + \frac{1}{2} [n - \ln(2) - (n-1) \psi_0(n/2)]\) where \(\psi_0\) is the polygamma function</entropy>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <cite>krishnaiah1963note</cite>
        </distribution>

        <distribution id="continuous-uniform">
            <name>continuous uniform distribution </name>
            <name>rectangular distribution</name>
            <type>continuous</type>
            <model>The continuous uniform distribution governs a point chosen at random from an interval. The continuous uniform distribution, aka rectangular distribution, is a family of probability distributions where all intervals of the same length on the distribution's support are equally probable. The support is defined by the two parameters, a and b, which are its minimum and maximum values. It is the maximum entropy probability distribution for a random variate X under no constraint other than that it is contained in the distribution's support.</model>
            <parameter>\(a \in (-\infty, \infty)\), location, the left endpoint</parameter>
            <parameter>\( w \in (0, \infty) \), scale, the width of the interval</parameter>
            <parameter>\(b = a + w\), the right endpoint</parameter>
            <standard>\( a = 0, \; w = 1 \)</standard>
            <support>\([a, b]\)</support>
            <pdf>\(f(x) = \frac{1}{b - a}, \; x \in [a, b]\)</pdf>
            <mode> all \(x \in [a, b]\)</mode>
            <cdf>\(F(x) = \frac{x - a}{b - a}, \; x \in [a, b]\)</cdf>
            <qf>\(Q(p) = a + p (b - a). \; p \in [0, 1]\)</qf>
            <mgf>\(M(t) = \frac{e^{t b} - e^{t a}}{t (b - a)}, \; t \in (-\infty, \infty)\)</mgf>
            <cf>\(\varphi(t) = \frac{e^{i t b} - e^{i t a}}{i t (b - a)}, \; t \in (-\infty, \infty)\)</cf>
            <moments type="raw">\(\mu(t) = \frac{b^{t+1} - a^{t+1}}{(t + 1)(b - a)}, \; t \in (0, \infty)\)</moments>
            <mean>\(\frac{1}{2}(a + b)\)</mean>
            <variance>\(\frac{1}{12} (b - a)^2\)</variance>
            <skew>\(0\)</skew>
            <kurt>\(-\frac{6}{5}\)</kurt>
            <entropy>\(\ln(b - a)\)</entropy>
            <median>\(\frac{1}{2}(a + b)\)</median>
            <q1>\(\frac{3}{4} a + \frac{1}{4}b\)</q1>
            <q3>\(\frac{1}{4} a + \frac{3}{4} b\)</q3>
            <family>location </family>
            <family>scale</family>
            <cite>kuipers2006uniform</cite>
        </distribution>

        <distribution id="discrete-uniform">
            <name>discrete uniform distribution</name>
            <type>discrete</type>
            <model>The discrete uniform distribution governs a point chosen at random from a discrete interval.</model>
            <parameter>\(a \in (-\infty, \infty)\), location, the left endpoint</parameter>
            <parameter>\( h \in (0, \infty) \), scale, the step size</parameter>
            <parameter>\( n \in \{1, 2, \ldots\} \), the number of points</parameter>
            <parameter>\(b = a + (n - 1) h\), the right endpoint</parameter>
            <standard>\( a = 0, \; h = 1 \)</standard>
            <support>\(\{a, a + h, \ldots, a + (n - 1) h\}\)</support>
            <pdf>\( f(x) = \frac{1}{n}, \; x \in \{a, a + h, \ldots, a + (n - 1) h\}\)</pdf>
            <mode>\(x \in \{a, a + h, \ldots, a + (n - 1) h\}\)</mode>
            <cdf>\(F(x) = \frac{1}{n}\left(\left \lfloor\ \frac{x - a}{h} + 1\right \rfloor \right), \; x \in [a, b] \)</cdf>
            <qf>\(Q(p) = a + \left(\lceil n p \rceil - 1\right) h, \; p \in (0, 1] \)</qf>
            <mgf>\(M(t) = \frac{1}{n} e^{t a} \frac{1 - e^{n t h}}{1 - e^{t h}}, \; t \in (-\infty, \infty)\)</mgf>
            <mean>\(\frac{1}{2}(a + b) = a + \frac{1}{2}(n - 1)h\)</mean>
            <variance>\(\frac{1}{12}(n^2 - 1) h^2 = \frac{1}{12}(b - a)(b - a + h)\)</variance>
            <skew>\(0\)</skew>
            <kurt>\(-\frac{6(n^2 + 1)}{5(n^2 - 1)}\,\)</kurt>
            <entropy>\(\ln(n)\,\)</entropy>
            <median>\(Q\left(\frac{1}{2}\right) \) where \( Q \) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \( Q \) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \( Q \) is the quantile function</q3>
            <family>location</family>
            <family>scale</family>
            <cite>freund1967modern</cite>
        </distribution>

        <distribution id="exponential">
            <name>exponential distribution</name>
            <name>negative exponential distribution</name>
            <type>continuous</type>
            <model>The exponential distribution models the time between random points in the Poisson model.</model>
            <parameter>\(r \in (0, \infty)\), rate</parameter>
            <parameter>\( b = 1 / r \), scale</parameter>
            <standard>\( r = 1 \)</standard>
            <support>\([0, \infty)\)</support>
            <pdf>\(f(x) = r e^{-r x}, \; x \in [0, \infty)\)</pdf>
            <mode>\(0\)</mode>
            <cdf>\(F(x) = 1 - e^{-r x}, \; x \in [0, \infty)\)</cdf>
            <qf>\(Q(p) = \frac{- \ln(1 - p)}{r}, \; p \in [0, 1)\)</qf>
            <mgf>\(\left(1 - \frac{t}{\lambda}\right)^{-1}\,\)</mgf>
            <cf>\(\frac{r}{r - i t}, \; t \in (-\infty, \infty)\)</cf>
            <mean>\(\frac{1}{r}\)</mean>
            <variance>\(\frac{1}{r^2}\)</variance>
            <skew>\(2\)</skew>
            <kurt>\(6\)</kurt>
            <entropy>\(1 - \ln(r)\)</entropy>
            <median>\(\frac{\ln(2)}{r}\)</median>
            <q1>\(\frac{\ln(4) - \ln(3)}{r}\)</q1>
            <q3>\(\frac{\ln(3)}{r}\)</q3>
            <family>exponential </family>
            <family>scale</family>
            <history>The exponential distribution was named by Karl Pearson in 1895.</history>
            <cite>siegrist2007exponential</cite>
        </distribution>

        <distribution id="exponential-logarithmic">
            <name>exponential-logarithmic distribution</name>
            <type>continuous</type>
            <model>The exponential-logarithmic distribution models failure times of devices with decreasing failure rate.</model>
            <parameter>\(p \in (0, 1)\), the shape parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( p = \frac{1}{2}, \; b = 1 \)</standard>
            <support>\([0,\infty)\)</support>
            <pdf>\(f(x) = \frac{1}{-\ln p} \frac{\beta(1 - p) e^{-x/b}}{1 - (1 - p) e^{-x/b}}, \; x \in [0, \infty)\)</pdf>
            <mode>\(0\)</mode>
            <cdf>\(F(x) = 1 - \frac{\ln(1 - (1 - p) e^{-x/b})}{\ln p}, \; x in [0, \infty)\)</cdf>
            <qf>\(Q(u) = b \ln\left(\frac{1 - p}{1 - p^{1 - u}}\right), \; u \in (0, 1)\)</qf>
            <moments type="raw">\(\mu(n) = -n! b^n \frac{L_{n+1}(1 - p)}{\ln(p)}, \; n \in \{0, 1, \ldots\}\) where \(L_{n+1}\) is the polylog function of order \(n + 1\)</moments>
            <mean>\(-b \frac{L_2(1 - p)}{\ln(p)}\) where \(L_2\) is the polylog function of order \(2\).</mean>
            <variance>\(-b^2 \frac{2 L_3(1 - p)}{ln(p)} - \frac{L_2^2(1 - p)}{b^2 \ln^2(p)}\) where \(L_n\) is the polylog function of order \(n\)</variance>
            <median>\(b \ln(1 + \sqrt{p})\)</median>
            <q1>\(b \ln\left(\frac{1 - p}{1 - p^{3/4}}\right)\)</q1>
            <q3>\(b \ln\left(\frac{1 - p}{1 - p^{1/4}}\right)\)</q3>
            <family>scale</family>
            <cite>tahmasbi2008two</cite>
        </distribution>

        <distribution id="exponential-power">
            <name>exponential power distribution </name>
            <name>generalized error distribution</name>
            <type>continuous</type>
            <model>The exponential power distribution is a family of symmetric, unimodal distributions that generalizes the normal and Laplace families.</model>
            <parameter>\(\mu \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(\alpha \in (0, \infty)\), the scale parameter</parameter>
            <parameter>\(\beta \in (0, \infty)\), the shape parameter</parameter>
            <support>\(x \in (-\infty; +\infty)\!\)</support>
            <pdf>\(f(x) = \frac{\beta}{2 \alpha \Gamma(1/\beta)} \exp\left[-\left(\frac{|x - \mu|}{\alpha}\right)^\beta\right], \; x \in (-\infty, \infty)\) where \(\Gamma\) is the gamma function</pdf>
            <mode>\(\mu\)</mode>
            <cdf>\(F(x) = \frac{1}{2} + \frac{\sgn(x - \mu)}{2 \Gamma (1 / \beta)} \gamma\left[\frac{1}{\beta}, \left(\frac{|x - \mu|}{\alpha}\right)^\beta\right], \; x \in (-\infty, \infty)\), where \(\Gamma\) is the gamma function and \(\gamma\) is the lower incomplete gamma function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \quad p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mean>\(\mu\)</mean>
            <variance>\(\frac{\alpha^2 \Gamma(3/\beta)}{\Gamma(1/\beta)}\) where \(\Gamma\) is the gamma function</variance>
            <skew>\(0\)</skew>
            <kurt>\(\frac{\Gamma(5/\beta) \Gamma(1/\beta)}{\Gamma^2(3/\beta)} - 3\) where \(\Gamma\) is the gamma function</kurt>
            <entropy>\(\frac{1}{\beta} - \log\left[\frac{\beta}{2 \alpha \Gamma(1/\beta)}\right]\) where \(\Gamma\) is the gamma function</entropy>
            <median>\(\mu\)</median>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <family>location </family>
            <family>scale</family>
            <cite>zhu2009properties</cite>
        </distribution>

       <distribution id="F">
            <name>F-distribution </name>
            <name>Snedecor's F-distribution </name>
            <name>Fisher-Snedecor distribution</name>
            <type>continuous</type>
            <model>The F-distribution governs the ratio of independent, scaled chi-square variables.</model>
            <parameter>\(n \in (0, \infty)\), numerator degrees of freedom</parameter>
            <parameter>\(d \in (0, \infty)\), denominator degrees of freedom</parameter>
            <standard>\( n = 1, \; d = 1 \)</standard>
            <support>\([0, \infty)\)</support>
            <pdf>\( f(x) = \frac{\Gamma[(n + d)/2]}{\Gamma(n/2) \Gamma(d/2)} \left(\frac{n}{d}\right)^{n/2} \frac{x^{(n-2)/2}}{[1 + (n/d) x]^{(n+d)/2}}, \; x \in [0, \infty) \)</pdf>
            <mode>\(\frac{n - 2}{n} \frac{d}{d + 2} \) for \(n \gt 2\)</mode>
            <cdf>\(F(x) = \frac{B[n x / (n x + d), n/2, d/2]}{B(n/2, d/2)}, \; x \in [0, \infty)\), where \( B \) is the beta function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mgf>Does not exist</mgf>
            <mean>\(\frac{d}{d - 2}\) for \(d \gt 2\)</mean>
            <variance>\(\frac{2 d (n + d - 2)}{n (d - 2)^2 (d - 4)}\) for \(d \gt 4\)</variance>
            <skew>\(\frac{(2 n + d - 2) \sqrt{8(d - 4)}}{(d - 6)\sqrt{n (n + d - 2)}}\) for \(d \gt 6 \)</skew>
            <kurt>\(\frac{20 d - 8 d^2 + d^3 + 44 n - 32 n d + 5 n d^2 - 22 n^2 - 5 n^2 d - 16}{n (d - 6)(d - 8)(n + d - 2)/12}\) for \( d \gt 8 \)</kurt>
            <median> \(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <history>The \(F\)-distribution was first derived by George Snedecor in 1934.  The letter F was chosen as a tribute to Ronald Fisher.</history>
            <cite>johnson2005univariate</cite>
        </distribution>

        <distribution id="gamma">
            <name>gamma distribution</name>
            <type>continuous</type>
            <model>The gamma distribution governs the arrival times in the Poisson model, and has many applications in statistics.</model>
            <parameter>\(k \in (0, \infty)\), the shape parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( k = 1, \; b = 1 \)</standard>
            <support>\((0,\,\infty) \)</support>
            <pdf>\(f(x) = \frac{1}{\Gamma(k) b^k} x^{k - 1}e^{-x / b}, \; x \in (0, \infty) \) where \( \Gamma \) is the gamma function</pdf>
            <mode>\((k - 1) b \) for \( k \gt 1 \)</mode>
            <cdf>\( F(x) = \frac{1}{\Gamma(k)} \gamma\left(k, \frac{x}{b}\right) \) where \( \gamma \) is the incomplete gamma function</cdf>
            <qf>\(Q(p) = F^{-1}(p)\) where \(F\) is the distribution function</qf>
            <mgf>\(M(t) = \frac{1}{(1 - b t)^k}, \; t \in (-\infty, 1 / b)\)</mgf>
            <cf>\(\varphi(t) = \frac{1}{(1 - i b t)^k}, \; t \in (-\infty, \infty)\)</cf>
            <mean>\( k b\)</mean>
            <variance>\( k b^2 \)</variance>
            <skew>\( \frac{2}{\sqrt{k}} \)</skew>
            <kurt>\( \frac{6}{k} \)</kurt>
            <entropy>\(\ln (4 \pi b)\)</entropy>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1> \(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <family>scale </family>
            <family>exponential</family>
            <cite>siegrist2007exponential</cite>
        </distribution>

        <distribution id="geometric">
            <name>geometric distribution</name>
            <type>discrete</type>
            <model>The geometric distribution models the trial number of the first success in a sequence of Bernoulli trials.</model>
            <parameter>\(p \in (0, 1]\), the success parameter</parameter>
            <standard>\( p = \frac{1}{2} \)</standard>
            <support>\(\{1, 2, 3, \ldots\}\)</support>
            <pdf>\(f(k) = p (1 - p)^{k - 1}, \; k \in \{1, 2, \ldots\}\)</pdf>
            <mode>\(1\)</mode>
            <cdf>\( F(k) = 1 - (1 - p)^k \)</cdf>
            <qf>\(Q(u) = \left\lceil \frac{\ln(1 - u)}{\ln(1 - p)} \right\rceil, \; u \in [0, 1)\)</qf>
            <pgf>\(P(t) = \frac{p t}{1 - (1 - p)t}, \; t \in \left(-\frac{1}{1 - p}, \frac{1}{1 - p}\right)\)</pgf>
            <mgf>\(M(t) = \frac{p e^t}{1 - (1 - p) e^t}, \; t \in (-\infty, -\ln(1 - p))\)</mgf>
            <cf>\(\varphi(t) = \frac{p e^{i t}}{1 - (1 - p) e^{i t}}, \; t \in (-\infty, \infty)\)</cf>
            <mean>\(\frac{1}{p}\)</mean>
            <variance>\(\frac{1 - p}{p^2}\)</variance>
            <skew>\(\frac{2 - p}{\sqrt{1 - p}}\!\)</skew>
            <kurt>\(6 + \frac{p^2}{1 - p}\)</kurt>
            <entropy>\(\frac{-(1 - p) \log_2 (1 - p) - p \log_2 p}{p} \)</entropy>
            <median>\(\left\lceil \frac{-\ln(2)}{\ln(1 - p)} \right\rceil\)</median>
            <q1>\(\left\lceil \frac{\ln(3) - \ln(4)}{\ln(1 - p)} \right\rceil\)</q1>
            <q3>\(\left\lceil \frac{-\ln(4)}{\ln(1 - p)} \right\rceil\)</q3>
            <family>power series </family>
            <family>exponential</family>
            <history>The geometric distribution was used very early in the history of probability, but the name has been attributed to William Feller in 1950.</history>
            <cite>philippou1983generalized</cite>
        </distribution>
        
        <distribution id="hypergeometric">
            <name>hypergeometric distribution</name>
            <type>discrete</type>
            <model>The hypergeometric distribution governs the number of objects of a given type when sampling without replacement from a multi-type population.</model>
            <parameter>\(N\), the population size</parameter>
            <parameter>\(m\), the number of type 1 objects in the population</parameter>
            <parameter>\(n\), the sample size</parameter>
            <support>\( \left\{\max{(0,\, n+m-N)},\, \dots,\, \min{(m,\, n )}\right\}\)</support>
            <pdf>\( f(x) = \frac{\binom{m}{x} \binom{N - m}{n - x}}{\binom{N}{n}}, \; x \in \left\{\max{(0,\, n+m-N)},\, \dots,\, \min{(m,\, n )}\right\} \)</pdf>
            <mode>\(\left \lfloor \frac{(n + 1)(m + 1)}{N + 2} \right \rfloor\)</mode>
            <cdf>\(1-{{{n \choose {k+1}}{{N-n} \choose {m-k-1}}}\over {N \choose m}} \,_3F_2\!\!\left[\begin{array}{c}1,\ k+1-m,\ k+1-n \\ k+2,\ N+k+2-m-n\end{array};1\right]\)</cdf>
            <mgf>\(\frac{{N-m \choose n} \scriptstyle{\,_2F_1(-n, -m; N - m - n + 1; e^{t}) } }
                {{N \choose n}}  \,\!\)</mgf>
            <mean>\(n \frac{m}{N}\)</mean>
            <variance>\( n \frac{m}{N} \frac{N - m}{N} \frac{N - n}{N - 1} \)</variance>
            <skew>\(\frac{(N - 2 m)\sqrt{N - 1}(N - 2 n)}{\sqrt{n m(N - m)(N - n)}(N - 2)}\)</skew>
            <kurt>\(\left[ \frac{N^2 (N-1)}{n(N - 2)(N - 3)(N - n)}\right] \left[ \frac{N(N+1) - 6 N(N - n)}{m (N - m)} + \frac{3 n (N - n)(N + 6)}{N^2} - 6 \right]\)</kurt>
            <history>The hypergeometric distribution is very old, and was used by Jacob Bernoulli, Abraham DeMoivre, and others. The named was coined by H.T. Gonin in 1936.</history>
            <cite>harkness1965properties</cite>
        </distribution>

        <distribution id="hyperbolic-secant">
            <name>hyperbolic secant distribution</name>
            <type>continuous</type>
            <model>The hyperbolic secant distribution is a symmetric, unimodal distribution but with larger kurtosis than the normal distribution.</model>
            <parameter>\( \mu \in (-\infty, \infty) \), the location parameter</parameter>
            <parameter>\( \sigma \in (0, \infty) \), the scale parameter</parameter>
            <standard>\( \mu = 0, \; \sigma = 1 \)</standard>
            <support>\((-\infty, \infty)\)</support>
            <pdf>\( f(x) = \frac{1}{2 \sigma} \sech\left[\frac{\pi}{2}\left(\frac{x - \mu}{\sigma}\right)\right], \; x \in (-\infty, \infty) \)</pdf>
            <mode>\(\mu\)</mode>
            <cdf>\( F(x) = \frac{2}{\pi} \arctan\left\{\exp\left[\frac{\pi}{2} \left(\frac{x - \mu}{\sigma} \right) \right]\right\}, \; x \in (-\infty, \infty) \)</cdf>
            <qf>\(Q(p) = \mu + \sigma \frac{2}{\pi} \ln[\tan(\frac{\pi}{2} p)], \; p \in (0, 1)\)</qf>
            <mgf>\(M(t) = e^{\mu t} \sec(\sigma t), \; t \in (-\frac{\pi}{1}, \frac{\pi}{2 \sigma})\)</mgf>
            <mean>\(\mu\)</mean>
            <variance>\(\sigma^2\)</variance>
            <skew>\(0\)</skew>
            <kurt>\(2\)</kurt>
            <median>\(\mu\)</median>
            <q1>\( \mu + \sigma \frac{2}{\pi} \ln(\sqrt{2} - 1)\)</q1>
            <q3>\( \mu + \sigma \frac{2}{\pi} \ln(\sqrt{2} + 1)\)</q3>
            <family>location</family>
            <family>scale</family>
            <cite>harkness1968generalized</cite>
        </distribution>

        <distribution id="Irwin-Hall">
            <name>Irwin-Hall distribution</name>
            <type>continuous</type>
            <model>The Irwin-Hall distribution governs the sum of \(n\) independent variables, each uniformly distributed on \([0, 1]\).</model>
            <parameter>\(n \in \{1, 2, \ldots\}\), the number of terms</parameter>
            <standard>\( n = 1 \)</standard>
            <support>\([0, n]\)</support>
            <pdf>\(f(x) = \frac{1}{2 (n - 1)!} \sum_{k=0}^n (-1)^k \binom{n}{k}\sgn(x - k)(x - k)^{n-1}, \; x \in [0, n]\)</pdf>
            <mode>\( n/2 \) for \( n \ge 2 \)</mode>
            <cdf>\( F(x) = \frac{1}{2} + \frac{1}{2 n!} \sum_{k=0}^n (-1)^k \binom{n}{k} \sgn(x - k) (x - k)^n, \; x \in [0, n]\)</cdf>
            <mgf>\(M(t) = \left(\frac{e^t - 1}{t}\right)^n, \; t \in (-\infty, \infty)\)</mgf>
            <mean>\(\frac{n}{2}\)</mean>
            <variance>\(\frac{n}{12}\)</variance>
            <median>\(\frac{n}{2}\)</median>
            <history>The Irwin-Hall distribution is named for Joseph Irwin and Phillip Hall who independently analyzed the distribution in 1927.</history>
            <cite>hall1927distribution</cite>
        </distribution>

        <distribution id="inverted-beta">
            <name>inverted beta distribution </name>
            <name>beta prime distribution </name>
            <name>beta distribution of the second kind</name>
            <type>continuous</type>
            <model>The inverted beta distribution is conjugate for the odds in the Bernoulli distribution </model>
            <parameter>\(\alpha \in (0, \infty)\), the first shape parameter</parameter>
            <parameter>\(\beta \in (0, \infty)\), the second shape parameter</parameter>
            <standard>\( \alpha = 1, \; \beta = 1 \)</standard>
            <support>\(x > 0\!\)</support>
            <pdf>\(f(x) = \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}\!\)</pdf>
            <mode>\(\frac{\alpha - 1}{\beta + 1}\) if \(\alpha \in [1, \infty)\)</mode>
            <cdf>\(F(x) = \int_0^x f(t) dt, \; x \in (0, \infty)\) where \(f\) is the probability density function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mean>\(\frac{\alpha}{\beta - 1}\) if \(\beta \in (1, \infty)\)</mean>
            <variance>\(\frac{\alpha (\alpha + \beta - 1)}{(\beta - 2)(\beta - 1)^2}\) if \(\beta \in (2, \infty)\)</variance>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <cite>mcdonald1995generalization</cite>
        </distribution>

        <distribution id="Laplace">
            <name>Laplace distribution </name>
            <name>double exponential distribution</name>
            <type>continuous</type>
            <model>The Laplace distribution is a symmetric, unimodal distribution with tails that are fatter than those of the normal distribution </model>
            <parameter>\(a \in (-\infty, \infty)\), location</parameter>
            <parameter>\(b \in (0, \infty)\), scale</parameter>
            <standard>\( a = 0, \; b = 1 \)</standard>
            <support>\( (-\infty, \infty) \)</support>
            <pdf>\(f(x) = \frac{1}{2 b} \exp \left(-\frac{\left|x - a\right|}{b} \right), \; x \in (-\infty, \infty) \)</pdf>
            <mode>\( a \)</mode>
            <cdf>\( F(x) = \begin{cases} \frac{1}{2} \exp\left(\frac{x - a}{b}\right), &amp; x \in (-\infty, a] \\ 1 - \frac{1}{2} \exp\left(-\frac{x - a}{b}\right), &amp; x \in [a, \infty) \end{cases} \)</cdf>
            <qf>\(Q(p) = a + b \ln(2 \min\{p, 1 - p\}), \; p \in (0, 1)\)</qf>
            <mgf>\(M(t) = \frac{e^{a t}}{1 - b^2 t}, \; t \in (-\frac{1}{b}, \frac{1}{b})\)</mgf>
            <cf>\(\varphi(t) = \frac{e^{a i t}}{1 + b^2 t}, \; t \in (-\infty, \infty)\)</cf>
            <mean>\( a \)</mean>
            <variance>\(2 b^2\)</variance>
            <skew>\( 0 \)</skew>
            <kurt>\( 3 \)</kurt>
            <entropy>\(\log(2 e b)\)</entropy>
            <median>\( a \)</median>
            <q1>\(a - b \ln(2)\)</q1>
            <q3>\(a + b \ln(2)\)</q3>
            <family>location</family>
            <history>The Laplace distribution is named for Pierre Simon Laplace.</history>
            <cite>kotz2001laplace</cite>
        </distribution>

        <distribution id="Levy">
            <name>Levy distribution </name>
            <name>van der Waals profile</name>
            <name>stable distribution</name>
            <type>continuous</type>
            <model>The Levy distribution is a stable distribution that has applications in spectroscopy. </model>
            <parameter>\(a \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( a = 0, \; b = 1 \)</standard>
            <support>\((a, \infty)\)</support>
            <pdf>\( f(x) = \sqrt{\frac{b}{2 \pi}} \frac{1}{(x - a)^{3/2}} \exp\left[-\frac{b}{2 (x - a)}\right], \; x \in (a, \infty)\)</pdf>
            <mode>\( a + \frac{1}{3} b\)</mode>
            <cdf>\( F(x) = 2 \left[1 - \Phi\left(\sqrt{\frac{b}{x - a}}\right)\right], \; x \in (a, \infty) \) where \( \Phi \) is the standard normal distribution function</cdf>
            <qf>\( F^{-1}(p) = a + \frac{b}{\left[\Phi^{-1}(1 - p / 2)\right]^2}, \; p \in [0, 1) \) where \( \Phi^{-1} \) is the standard normal quantile function</qf>
            <cf>\(\varphi(t) = \exp\left(i a t - \sqrt{-2 i b t}\right), \; t \in (-\infty, \infty)\)</cf>
            <mean>\(\infty\)</mean>
            <variance>\(\infty\)</variance>
            <skew>undefined</skew>
            <kurt>undefined</kurt>
            <entropy>\(\frac{1}{2}[1 + 3 \gamma + \ln(16 \pi b^2)]\) where \(\gamma\) is Euler's constant</entropy>
            <median>\( a + b \left[\Phi^{-1}\left(\frac{3}{4}\right)\right]^{-2}\) where \(\Phi^{-1}\) is the standard normal quantile function</median>
            <q1>\( a + b \left[\Phi^{-1}\left(\frac{7}{8}\right)\right]^{-2}\) where \(\Phi^{-1}\) is the standard normal quantile function</q1>
            <q3>\( a + b \left[\Phi^{-1}\left(\frac{5}{8}\right)\right]^{-2}\) where \(\Phi^{-1}\) is the standard normal quantile function</q3>
            <family>location </family>
            <family>scale </family>
            <family>stable</family>
            <history>The Levy distribution is named for Paul Pierre Levy.</history>
            <cite>barndorff2001levy</cite>
        </distribution>

        <distribution id="Landau">
            <name>Landau distribution </name>
            <type>continuous</type>
            <model>The Landau distribution is used in physics to describe the fluctuations in the energy
                loss of a charged particle passing through a thin layer of matter. This distribution is a
                special case of the stable Levy distribution with parameters (1, 1).</model>
            <parameter>\(\mu \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(c \in (0, \infty)\), the scale parameter</parameter>
            <support>\(1, \infty)\)</support>
            <pdf>\(f(x) = \sqrt{\frac{1}{2 \pi}} \frac{e^{-1/2(x - 1)}}{(x - 1)^{3/2}}, \;
                x \in (1, \infty)\)</pdf>
            <mode>\(1 + \frac{1}{3}\)</mode>
            <cdf>\(F(x) = \int_1^x f(t) dt, \; x \in \) where \(f\) is the probability density function</cdf>
            <cite>landau1944energy</cite>
        </distribution>

        <distribution id="logarithmic">
            <name>logarithmic distribution </name>
            <name>logarithmic series distribution </name>
            <name>log-series distribution</name>
            <type>discrete</type>
            <model>The logarithmic distribution is sometimes used to model relative species abundance.</model>
            <parameter>\(p \in (0, 1)\), the shape parameter</parameter>
            <standard>\( p = \frac{1}{2} \)</standard>
            <support>\(\{1, 2, 3, \ldots\}\)</support>
            <pdf>\(f(k) = \frac{-1}{\ln(1 - p)} \frac{p^k}{k}, \; k \in \{1, 2, \ldots\}\)</pdf>
            <mode>\(1\)</mode>
            <cdf>\(F(k) = 1 + \frac{B(p; k + 1, 0)}{\ln(1 - p)}, \; k \in \{1, 2, \ldots\} \) where \( B \) is the incomplete beta function</cdf>
            <pgf>\(G(t) = \frac{\ln(1 - p t)}{\ln(1 - p)}, \; t \in (-\frac{1}{p}, \frac{1}{p})\)</pgf>
            <mgf>\(M(t) = \frac{\ln(1 - p e^t)}{\ln(1 - p)}, \; t \in (-\infty, -\ln(p))\)</mgf>
            <cf>\(\varphi(t) = \frac{\ln(1 - p e^{i t})}{\ln(1 - p)}, \; t \in (-\infty, \infty)\)</cf>
            <mean>\(\frac{-1}{\ln(1 - p)} \frac{p}{1 - p}\!\)</mean>
            <variance>\(-p \frac{p + \ln(1 - p)}{(1 - p)^2 \ln^2(1 - p)}\!\)</variance>
            <family>power series</family>
            <history>The logarithmic distribution was first derived by Ronald Fisher in 1943.</history>
            <cite>fisher1943relation</cite>
        </distribution>

        <distribution id="logistic">
            <name>logistic distribution</name>
            <type>continuous</type>
            <model>The logistic distribution occurs in logistic regression.</model>
            <parameter>\(a \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( a = 0, \; b = 1 \)</standard>
            <support>\((-\infty, \infty)\)</support>
            <pdf>\(f(x) = \frac{e^{-(x - a)/b}}{b \left(1 + e^{-(x - a)/b}\right)^2}, \; x \in (-\infty, \infty)\)</pdf>
            <mode>\( a \)</mode>
            <cdf>\(F(x) = \frac{1}{1 + e^{-(x - a)/b}}, \; x \in (-\infty, \infty)\)</cdf>
            <qf>\(Q(p) = a + b \ln\left(\frac{p}{1 - p}\right), \; p \in (0, 1)\)</qf>
            <mgf>\(M(t) = e^{a t} B(1 - b t, 1 + b t)\) where \(B\) is the beta function</mgf>
            <mean>\( a \)</mean>
            <variance>\( \frac{\pi^2}{3} b^2 \)</variance>
            <skew>\( 0 \)</skew>
            <kurt>\( 6/5 \)</kurt>
            <entropy>\(\ln(b) + 2\)</entropy>
            <median>\( a \)</median>
            <q1>\(a - \ln(3) b\)</q1>
            <q3>\(a + \ln(3) b\)</q3>
            <family>location</family>
            <family>scale</family>
            <history>Logistic regression was first used by D.R. Cox in 1958.</history>
            <cite>balakrishnan1992handbook</cite>
        </distribution>

        <distribution id="generalized logistic">
            <name>generalized logistic distribution</name>
            <name>skew logistic distribution</name>
            <type>continuous</type>
            <model>The  generalized logistic distribution represents several different families of
                probability distributions. One family is called the skew-logistic distribution.
                Other families of distributions that have also been called generalized
                ogistic distributions include the shifted log-logistic distribution,
                which is a generalization of the log-logistic distribution.</model>
            <parameter>\(\alpha >0\), the location parameter</parameter>
            <parameter>\(\beta >0\), the scale parameter</parameter>
            <support>\((-\infty, \infty)\)</support>
            <pdf>\(f(x;\alpha,\beta)=\frac{1}{B(\alpha,\beta)}\frac{\exp(-\beta x)}
                {(1+\exp(-x))^{\alpha+\beta}}\)</pdf>
            <cite>balakrishnan2009continuous</cite>
        </distribution>

        <distribution id="log-normal">
            <name>log-normal distribution </name>
            <name>log normal distribution </name>
            <name>lognormal distribution </name>
            <name>Galton distribution</name>
            <type>continuous</type>
            <model>The log-normal distribution models certain skewed variables.</model>
            <parameter>\(\mu \in (-\infty, \infty)\), the normal mean</parameter>
            <parameter>\(\sigma \in (0, \infty)\), the normal standard deviation</parameter>
            <parameter>\( e^\mu \), the scale parameter</parameter>
            <standard>\( \mu = 0, \; \sigma = 1 \)</standard>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x) = \frac{1}{\sigma x \sqrt{2 \pi}}\, \exp\left(-\frac{\left[\ln(x) - \mu\right]^2}{2 \sigma^2}\right), \; x \in (0, \infty) \)</pdf>
            <mode>\(e^{\mu - \sigma^2}\)</mode>
            <cdf>\( F(x) = \Phi\left[\frac{\ln(x) - \mu}{\sigma}\right], \; x \in (0, \infty)\) where \( \Phi \) is the standard normal distribution function</cdf>
            <qf>\(F^{-1}(p) = \exp\left[\mu + \sigma \Phi^{-1}(p)\right]\), where \(\Phi\) is the standard normal distribution function</qf>
            <moments type="raw">\(\mu(n) = \exp(\mu n + \frac{1}{2} \sigma^2 n^2), \; n \in \{0, 1, \ldots\}\)</moments>
            <mean>\(e^{\mu + \sigma^2/2}\)</mean>
            <variance>\((e^{\sigma^2} - 1) e^{2 \mu + \sigma^2}\)</variance>
            <skew>\((e^{\sigma^2} + 2) \sqrt{e^{\sigma^2} - 1}\)</skew>
            <kurt>\(e^{4 \sigma^2} + 2 e^{3 \sigma^2} + 3e^{2 \sigma^2} - 6\)</kurt>
            <entropy>\(\frac12 + \frac12 \ln(2 \pi \sigma^2) + \mu\)</entropy>
            <median>\(e^{\mu}\,\)</median>
            <q1>\(\exp\left[\mu + \sigma \Phi^{-1}\left(\frac{1}{4}\right)\right]\), where \(\Phi\) is the standard normal distribution function</q1>
            <q3>\(\exp\left[\mu + \sigma \Phi^{-1}\left(\frac{3}{4}\right)\right]\), where \(\Phi\) is the standard normal distribution function</q3>
            <family>scale </family>
            <family>exponential</family>
            <history>The lognormal distribution was first studied by Donald McAlister in 1879, in response to a problem posed by Francis Galton. This historical origin is the reason for the alternative name Galton distribution. The term lognormal distribution was first used by J.H. Gaddum in 1945.</history>
            <cite>famoye1995continuous</cite>
        </distribution>

        <distribution id="log-logistic">
            <name>log-logistic distribution </name>
            <name>Fisk distribution</name>
            <type>continuous</type>
            <model>The log-logistic distribution models lifetimes of devices whose failure rates at first increase and then decrease.</model>
            <parameter>\(k \in (0, \infty)\), the shape parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <support>\([0, \infty)\)</support>
            <standard>\( k = 1, \; b = 1 \)</standard>
            <pdf>\( f(x) = \frac{b^k k x^{k - 1}}{(b^k + x^k)^2}, \; x \in [0, \infty)\)</pdf>
            <mode>\( b \left(\frac{k - 1}{k + 1}\right)^{1/k} \) if \( a \gt 1 \)</mode>
            <cdf>\(F(x) = \frac{x^k}{b^k + x^k}, \; x \in [0, \infty)\)</cdf>
            <qf>\(F^{-1}(p) = b \left(\frac{p}{1 - p}\right)^{1/k}, \; p \in [0, 1)\)</qf>
            <moments type="raw">\(\mu(n) = b^n \frac{\pi n / k}{\sin(\pi n / k)}, \; n \lt k\)</moments>
            <mean>\( \infty \) if \( 0 \lt k \le 1 \); \(b \frac{\pi / k}{\sin(\pi / k)}\) if \( k \gt 1 \)</mean>
            <variance>does not exist if \( 0 \lt k \le 1 \); \( \infty \) if \( 1 \lt k \le 2 \); \( b^2 \left[\frac{2 \pi / k}{\sin(2 \pi / k)} - \frac{\pi^2 / k^2}{\sin^2(\pi / k)}\right] \) if \( k \gt 2 \)</variance>
            <median>\( b\)</median>
            <q1>\( b (1/3)^{1/k} \)</q1>
            <q3>\(b 3^{1/k} \)</q3>
            <family>scale</family>
            <history>The log-logistic distribution is known as the Fisk distribution by economists. P.R. Fisk used the distribution to model income in 1961.</history>
            <cite>shoukri1988sampling</cite>
        </distribution>

        <distribution id="Maxwell-Boltzman">
            <name>Maxwell-Boltzmann distribution</name>
            <name>Maxwell distribution</name>
            <type>continuous</type>
            <model>The Maxwell-Boltzmann Distribution arises in the kinetic theory of gases.</model>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( b = 1 \)</standard>
            <support>\([0, \infty)\)</support>
            <pdf>\(f(x) = \frac{1}{b^3} \sqrt{\frac{2}{\pi}} x^2 \exp\left(-\frac{x^2}{2 b^2}\right), \; x \in [0, \infty)\)</pdf>
            <mode>\(\sqrt{2} b\)</mode>
            <cdf>\(F(x) = 2 \Phi\left(\frac{x}{b}\right) - \frac{1}{b} \sqrt{\frac{2}{\pi}} x \exp\left(-\frac{x^2}{2 b^2}\right) - 1\) where \( \Phi \) is the standard normal distribution function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mean>\(2 b \sqrt{\frac{2}{\pi}}\)</mean>
            <variance>\(\frac{b^2 (3 \pi - 8)}{\pi}\)</variance>
            <skew>\(\frac{2 \sqrt{2}(16 - 5 \pi)}{(3 \pi - 8)^{3/2}}\)</skew>
            <kurt>\(4 \frac{(-96 + 40 \pi - 3 \pi^2)}{(3 \pi - 8)^2}\)</kurt>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <family>scale</family>
            <history>The Maxwell-Boltzman distribution is named for James Clerk Maxwell and Ludwig Boltzmann for their use of the distribution is modeling the energy of molecules in a gas.</history>
            <cite>laurendeau2005statistical</cite>
        </distribution>

        <distribution id="negative-binomial">
            <name>negative binomial distribution </name>
            <name>Pascal distribution</name>
            <type>discrete</type>
            <model>The negative binomial distribution governs the number of trials needed for a specified number of successes in the Bernoulli trials model.</model>
            <parameter>\(k \in \{1, 2, \ldots\}\), the number of successes</parameter>
            <parameter>\(p \in (0, 1]\), the success parameter</parameter>
            <standard>\( k = 1, \; p = \frac{1}{2} \)</standard>
            <support>\(\{k, k+1, \ldots\}\)</support>
            <pdf>\(f(x) = \binom{x - 1}{k - 1} p^x (1 - p)^{x-k}, \; x \in \{k, k+1, \ldots\}\)</pdf>
            <mode>\(\lfloor 1 + \frac{k-1}{p}\rfloor\)</mode>
            <cdf>\(F(x) = \sum_{j=k}^x f(j) , \; x \in \{k, k+1, \ldots\}\) where \(f\) is the probability density function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <pgf>\(G(t) = \left[\frac{p t}{1 - (1-p) t}\right]^k, \; t \in (-\frac{1}{1-p}, \frac{1}{1-p})\)</pgf>
            <mgf>\(M(t) = \left[\frac{p e^t}{1 - (1-p) e^t}\right]^k, \; t \in (-\infty, -\ln(1 - p))\)</mgf>
            <cf>\(\varphi(t) = \left[\frac{p e^{i t}}{1 - (1-p) e^{i t}}\right]^k, \; t\in (-\infty, \infty)\)</cf>
            <mean>\(k \frac{1}{p}\)</mean>
            <variance>\(k \frac{1-p}{p^2}\)</variance>
            <skew>\(\frac{2-p}{\sqrt{k (1-p)}}\)</skew>
            <kurt>\(\frac{1}{k} \left[6 + \frac{p^2}{1 - p}\right]\)</kurt>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <history>The alternative name Pascal distribution is in honor of Blaise Pascal who used the distribution in his solution to the Problem of Points.</history>
            <cite>el2006negative</cite>
        </distribution>

        <distribution id="normal">
            <name>normal distribution </name>
            <name>Gaussian distribution </name>
            <name>error distribution</name>
            <type>continuous</type>
            <model>The normal distribution is used to model physical quantities that are subject to numerous small, random errors.</model>
            <parameter>\(\mu \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(\sigma \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( \mu = 0, \; \sigma = 1 \)</standard>
            <support>\((-\infty, \infty)\)</support>
            <pdf>\(f(x) = \frac{1}{\sqrt{2 \pi} \sigma} \exp \left[-\frac{1}{2}\left(\frac{x - \mu}{\sigma}\right)^2 \right], \; x \in (-\infty, \infty)\)</pdf>
            <mode>\(\mu\)</mode>
            <cdf>\(F(x) = \Phi\left(\frac{x - \mu}{\sigma}\right), \; x \in (-\infty, \infty)\) where \(\Phi\) is the standard normal distribution function</cdf>
            <qf>\(Q(p) =  \mu + \sigma \Phi^{-1}(p), \; p \in (0, 1)\) where \(\Phi\) is the standard normal distribution function</qf>
            <mgf>\(M(t) = \exp\left(\mu t + \frac{1}{2} \sigma^2 t^2\right), \; t \in (-\infty, \infty)\)</mgf>
            <cf>\(\varphi(t) = \exp\left(i \mu t - \frac{1}{2} \sigma^2 t^2\right), \; t \in (-\infty, \infty)\)</cf>
            <mean>\(\mu\)</mean>
            <variance>\(\sigma^2\)</variance>
            <skew>\(0\)</skew>
            <kurt>\(0\)</kurt>
            <entropy>\(\frac{1}{2} \ln(2 \pi e \sigma^2)\)</entropy>
            <median>\(\mu\)</median>
            <q1>\(\mu - \Phi^{-1}\left(\frac{1}{4}\right) \sigma\) where \(\Phi\) is the standard normal distribution function</q1>
            <q3>\(\mu + \Phi^{-1}\left(\frac{1}{4}\right) \sigma\) where \(\Phi\) is the standard normal distribution function</q3>
            <family>location </family>
            <family>scale </family>
            <family>exponential </family>
            <family>stable</family>
            <cite>dinov2008central</cite>
            <history>The normal distribution was first derived by Carl Friedrich Gauss in 1809 (hence the alternative name Gaussian distribution). The normalizing constant and the first version of the Central Limit Theorem were contributions by Pierre Simon Laplace. The term normalizing constant was popularized by Karl Pearson around the turn of the 20th century.</history>
        </distribution>

        <distribution id="students-t">
            <name>students t distribution </name>
            <name>Student's t distribution </name>
            <name>t distribution</name>
            <type>continuous</type>
            <model>The t distribution arises when estimating the mean of a normally distributed population when the sample size is small and population standard deviation is unknown. It comes into play in various statistical analyses like Students t-test for assessing the between-group statistical significant differences of two sample means,  construction of confidence intervals for difference between two population means, linear regression analyses, etc. Like the normal distribution, the t-distribution is symmetric, bell-shaped and unimodal. However it has heavier tails, meaning that it is more prone to producing values that fall far from its mean. The Students t-distribution is a special case of the generalized hyperbolic distribution</model>
            <parameter>\(n \in (0, \infty)\), degrees of freedom</parameter>
            <standard>\( n = 1 \)</standard>
            <support>\((-\infty, \infty)\)</support>
            <pdf>\(f(x) = \frac{\Gamma \left(\frac{n + 1}{2} \right)} {\sqrt{n \pi} \Gamma \left(\frac{n}{2} \right)} \left(1 + \frac{x^2}{n} \right)^{-\frac{n + 1}{2}}, \; x \in (-\infty, \infty)\) where \( \Gamma \) is the gamma function</pdf>
            <mode>\(0\)</mode>
            <cdf>\(F(x) = \frac{1}{2} + x \Gamma \left( \frac{n + 1}{2} \right) \frac{\,_2F_1 \left ( \frac{1}{2},\frac{n + 1}{2};\frac{3}{2}; - \frac{x^2}{n} \right)} {\sqrt{\pi n}\,\Gamma \left(\frac{n}{2}\right)}, \; x \in (-\infty, \infty)\), where \({ }_2F_1\) is the hypergeometric function</cdf>
            <mean>0 for \( n \gt 1 \)</mean>
            <variance>\(\frac{n}{n - 2}, \) for \( n \gt 2 \)</variance>
            <skew>0 for \( n \gt 3 \)</skew>
            <kurt>\(\frac{6}{n - 4}, \) for \( n \gt 4\)</kurt>
            <entropy>\(\frac{n + 1}{2}\left[\psi \left(\frac{1 + n}{2} \right) - \psi \left(\frac{n}{2} \right) \right] + \log{\left[\sqrt{n} B \left(\frac{n}{2}, \frac{1}{2} \right)\right]} \)</entropy>
            <median>0</median>
            <family>exponential </family>
            <cite>li1957student</cite>
        </distribution>

        <distribution id="truncated-normal">
            <name>truncated normal distribution </name>
            <type>continuous</type>
            <model>The truncated normal distribution is the probability distribution of a normally
                distributed random variable whose value is either bounded below, above or on both sides.
                The truncated normal distribution has wide applications in statistics and econometrics</model>
            <parameter>\(\mu \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(\sigma \in (0, \infty)\), the scale parameter</parameter>
            <parameter>\(a \in (-\infty, \infty)\), left limit</parameter>
            <parameter>\(b \in (a, \infty)\), right limit</parameter>
            <support>\([a, b]\)</support>
            <pdf>\(f(x;\mu,\sigma,a,b)=\frac{1}{\sigma Z}\phi(\xi)\)</pdf>
            <mode><![CDATA[
        \(\left\{\begin{array}{ll}a,&\mathrm{if}\\mu<a\\\mu,&\mathrm{if}\a\le\mu\leb\\b,&\mathrm{if}\\mu>b\end{array}\right.\)
        ]]></mode>
            <cdf>\(F(x;\mu,\sigma,a,b)=\frac{\Phi(\xi)-\Phi(\alpha)}{Z}\)</cdf>
            <mean>\(\mu+\frac{\phi(\alpha)-\phi(\beta)}{Z}\sigma\)</mean>
            <variance>\(\sigma^2\left[1+\frac{\alpha\phi(\alpha)-\beta\phi(\beta)}{Z}-\left(\frac{\phi(\alpha)-\phi(\beta)}{Z}\right)^2\right]\)</variance>
            <cite>kotz2000continuous</cite>
        </distribution>

        <distribution id="Pareto">
            <name>Pareto distribution </name>
            <name>Bradford distribution</name>
            <type>continuous</type>
            <model>The Pareto distribution models highly skewed variables that sometimes arise in economics.</model>
            <parameter>\(a \in (0, \infty)\), the shape parameter</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( a = 1, \; b = 1 \)</standard>
            <support>\([b, \infty)\)</support>
            <pdf>\(f(x) = \frac{a b^a}{x^{a+1}}, \; x \in [b, \infty)\)</pdf>
            <mode>\(b\)</mode>
            <cdf>\(F(x) = 1 - \left(\frac{b}{x}\right)^a, \; x \in [b, \infty)\)</cdf>
            <qf>\(F^{-1}(p) = \frac{b}{(1 - p)^{1/a}}, \; p \in [0, 1)\)</qf>
            <cf>\(\varphi(t) = a (-i b t)^a \gamma(-a, -i b t)\) where \(\gamma\) is the lower incomplete gamma function</cf>
            <moments type="raw">\(\mu(n) = b^n \frac{a}{a - n}, \; n \in (0, k)\)</moments>
            <mean>\( b \frac{a}{a - 1} \) for \( a \gt 1 \)</mean>
            <variance>\( b^2 \frac{a}{(a - 1)^2 (a - 2)} \) for \( a \gt 2 \)</variance>
            <skew>\(\frac{2(1 + a)}{a - 3} \sqrt{\frac{a - 2}{a}}\) for \(a \gt 3\)</skew>
            <kurt>\(\frac{6(a^3 + a^2 - 6 a - 2)}{a(a - 3)(a - 4)}\) for \(a \gt 4    s\)</kurt>
            <entropy>\(\ln\left(\frac{b}{a}\right) + \frac{1}{a} + 1\)</entropy>
            <median>\(b 2^{1/a}\)</median>
            <q1>\(b \left(\frac{4}{3}\right)^{1/a}\)</q1>
            <q3>\(b 4 ^{1/a}\)</q3>
            <family>scale </family>
            <history>The Pareto distributin is named for the Italian economist Vilfredo Pareto, who used the distribution to model wealth, income and other economic variables. </history>
            <cite>arnold1985pareto</cite>
        </distribution>

        <distribution id="Poisson">
            <name>Poisson distribution</name>
            <type>discrete</type>
            <model>The Poisson distribution models the number of random points in a region of time or space under certain ideal conditions.</model>
            <parameter>\(\lambda \in (0, \infty)\), the shape parameter</parameter>
            <standard>\( \lambda = 1 \)</standard>
            <support>\(\{0, 1, 2, \ldots\}\)</support>
            <pdf>\(f(k) = e^{-\lambda} \frac{\lambda^k}{k!}, \; k \in \{0, 1, \ldots\} \)</pdf>
            <mode>\(\lfloor\lambda\rfloor\) if \( \lambda \) is not an integer; \( \lambda \) and \( \lambda - 1 \) if \( \lambda \) is a positive integer</mode>
            <cdf>\(F(x) = \frac{1}{x!} \gamma(x + 1, \lambda), \; x \in \{0, 1, 2, \ldots\}\) where \(\gamma\) is the lower incomplete gamma function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <pgf>\(G(t) = e^{\lambda (t - 1)}, \; t \in (-\infty, \infty)\)</pgf>
            <mgf>\(M(t) = \exp\left(\lambda(e^t - 1)\right), \; t \in (-\infty, \infty)\)</mgf>
            <cf>\(\varphi(t) = \exp\left(\lambda(e^{i t} - 1)\right), \; t \in (-\infty, \infty)\)</cf>
            <moments type="factorial">\(m(k) = \lambda^k, \; k \in \{0, 1, 2, \ldots\}\)</moments>
            <mean>\(\lambda\)</mean>
            <variance>\(\lambda\)</variance>
            <skew>\(\sqrt{\lambda}\)</skew>
            <kurt>\(\frac{1}{\lambda}\)</kurt>
            <entropy>\(\lambda [1 - \log(\lambda)] + e^{-\lambda} \sum_{k=0}^\infty \frac{\lambda^k \log(k!)}{k!}\)</entropy>
            <median>\(Q\left(\frac12\right) \approx \lfloor\lambda + 1/3 - 0.02/\lambda\rfloor\)</median>
            <q1>\(Q\left(\frac14)\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac34\right)\) where \(Q\) is the quantile function</q3>
            <family>exponential</family>
            <family>power series</family>
            <history>The Poisson distribution is named for Simeon Poisson who first used the distribution in 1838 in a study of judgements in court cases.</history>
            <cite>consul1973generalization</cite>
        </distribution>

        <distribution id="Rademacher">
            <name>Rademacher distribution</name>
            <type>discrete</type>
            <model>The Rademacher distribution arises in physics and in bootstrapping.</model>
            <support>\(k\in\{-1,1\}\,\)</support>
            <pdf><![CDATA[
            \(f(k)=\begin{cases}1/2,&k=-1\\1/2,&k=1\end{cases}\)
        ]]></pdf>
            <mode>N/A</mode>
            <cdf><![CDATA[
            \(F(k)=\begin{cases}0,&k<-1\\1/2,&-1\leq k<1\\1,&k\geq1\end{cases}\)
        ]]></cdf>
            <qf>\(Q(p) = -1, \; p \in [0, \frac{1}{2}]; \quad Q(p) = 1, \; p \in (\frac{1}{2}, 1]\)</qf>
            <mgf>\(M(t) = \cosh(t), \; t \in (-\infty, \infty)\)</mgf>
            <cf>\(M(t) = \cos(t), \; t \in (-\infty, \infty)\)</cf>
            <moments>\(\mu(n) = 1, \; n \in \{0, 2, \ldots\}; \quad \mu(n) = 0, \; n \in \{1, 3, \ldots\}\)</moments>
            <mean>\(0\,\)</mean>
            <variance>\(1\,\)</variance>
            <skew>\(0\,\)</skew>
            <kurt>\(-2\,\)</kurt>
            <entropy>\(\ln(2)\)</entropy>
            <median>\(0\,\)</median>
            <q1>\(-1\)</q1>
            <q3>\(1\)</q3>
            <history>The Rademacher distribution is named for the German mathematician Hans Rademacher.</history>
            <cite>montgomery1990distribution</cite>
        </distribution>

        <distribution id="Rayleigh">
            <name>Rayleigh distribution</name>
            <type>continuous</type>
            <model>The Rayleigh distribution governs the magnitude of a vector with independent, normal components that have zero mean and the same variance.</model>
            <parameter>\(\sigma \in (0, \infty)\), scale</parameter>
            <standard>\( \sigma = 1 \)</standard>
            <support>\([0, \infty)\)</support>
            <pdf>\(f(x) = \frac{x}{\sigma^2} \exp\left(-\frac{x^2}{2 \sigma^2}\right), \; x \in [0, \infty) \)</pdf>
            <mode>\(\sigma\)</mode>
            <cdf>\(F(x) = 1 - \exp\left(-\frac{x^2}{2 \sigma^2}\right), \; x \in [0, \infty)\)</cdf>
            <qf>\(Q(p) = \sigma \sqrt{-2 \ln(1 - p)}, \; p \in [0, 1)\)</qf>
            <mgf>\(M(t) = 1 + \sqrt{2 \pi} \sigma t \exp\left(\frac{\sigma^2 t^2}{2}\right) \Phi(t), \; t \in (-\infty, \infty)\) where \(\Phi\) is the standard normal distribution function</mgf>
            <moments type="raw">\(\mu(n) = \sigma^n 2^{n/2} \Gamma\left(1 + \frac{n}{2}\right), \; n \in [0, \infty)\)</moments>
            <mean>\(\sigma \sqrt{\frac{\pi}{2}}\)</mean>
            <variance>\(\sigma^2 (2 - \pi / 2)\)</variance>
            <skew>\(\frac{2 \sqrt{\pi} (\pi - 3)}{(4 - \pi)^{3/2}}\)</skew>
            <kurt>\(-\frac{6 \pi^2 - 24 \pi + 16}{(4 - \pi)^2}\)</kurt>
            <entropy>\(1 + \ln\left(\frac{\sigma}{\sqrt{2}}\right) + \frac{\gamma}{2}\) where \(\gamma\) is Euler's constant</entropy>
            <median>\(\sigma \sqrt{\ln(4)}\,\)</median>
            <q1>\(\sigma \sqrt{\ln(16) - \ln(9)}\)</q1>
            <q3>\(\sigma \sqrt{\ln(16)}\)</q3>
            <family>scale</family>
            <history>The Rayleigh distribution is named for the English mathematician Lord Rayleigh (John William Strutt).</history>
            <cite>kuruoglu2004modeling</cite>
        </distribution>

        <distribution id="Rice">
            <name>Rice distribution </name>
            <name>Rician distribution</name>
            <type>continuous</type>
            <model>The Rice distribution governs the magnitude of a circular bivariate normal random vector.</model>
            <parameter>\(\nu \in [0, \infty)\), the distance parameter</parameter>
            <parameter>\(\sigma \in (0, \infty)\), the scale parameter</parameter>
            <support>\([0, \infty)\)</support>
            <pdf>\(\frac{x}{\sigma^2}\exp\left(\frac{-(x^2+\nu^2)}
                {2\sigma^2}\right)I_0\left(\frac{x\nu}{\sigma^2}\right)\)</pdf>
            <cdf>\(F(x) = 1 - Q\left(\frac{\nu}{\sigma}, \frac{x}{\sigma}\right), \; x \in [0, \infty)\) where \(Q\) is the Marcum \(Q\)-function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mean>\(\sigma  \sqrt{\pi/2}\,\,L_{1/2}(-\nu^2/2\sigma^2)\)</mean>
            <variance>\(2\sigma^2+\nu^2-\frac{\pi\sigma^2}{2}L_{1/2}^2\left(\frac{-\nu^2}{2\sigma^2}\right)\)</variance>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <family>scale</family>
            <history>The Rice distribution is named for Stephen O. Rice who used the distribution in 1945 in his study of random noise.</history>
            <cite>rice1945mathematical</cite>
        </distribution>

        <distribution id="semicircle">
            <name>semicircle distribution </name>
            <name>Wigner distribution </name>
            <name>Stato-Tate distribution</name>
            <type>continuous</type>
            <model>The semicircle distribution arises as the limiting distribution of the eigenvalues of random symmetric matrices.</model>
            <parameter>\(r \in (0, \infty)\), scale (radius)</parameter>
            <parameter>\( a \in (-\infty, \infty) \), location (center)</parameter>
            <standard> \( a = 0, \; r = 1 \)</standard>
            <support>\( [a - r, a + r] \)</support>
            <pdf>\(f(x) = \frac{2}{\pi r^2} \sqrt{r^2 - (x - a)^2}, \; x \in [a - r, a + r]\)</pdf>
            <mode>\( a \)</mode>
            <cdf>\( F(x) = \frac{1}{2} + \frac{x - a}{\pi r^2} \sqrt{r^2 - (x - a)^2} + \frac{1}{\pi} \arcsin\left(\frac{x - a}{r}\right), \; x \in [a - r, a + r] \)</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in [0, 1]\) where \(F\) is the distribution function</qf>
            <mgf>\(M(t) = 2 e^{t a} \frac{I_1(r t)}{r t}, \; t \in (-\infty, \infty)\) where \(I_1\) is the modified Bessel function</mgf>
            <cf>\(\varphi(t) = 2 e^{i t a} \frac{J_1(r t)}{r t}, \; t \in (-\infty, \infty)\) where \(J_1\) is the Bessel function</cf>
            <moments type="raw">For \( a = 0 \), \( \mu(2 n) = \left(\frac{r}{2}\right)^{2 n} \frac{1}{n + 1} \binom{2n}{n} \) and \( \mu(2 n + 1) = 0 \) for \( n \in \{0, 1, \ldots\} \)</moments>
            <mean>\( a \)</mean>
            <variance>\(\frac{r^2}{4}\)</variance>
            <skew>\(0\,\)</skew>
            <kurt>\(-1\,\)</kurt>
            <entropy>\(\ln(\pi r) - \frac{1}{2}\)</entropy>
            <median>\( a\,\)</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <family>location</family>
            <family>scale</family>
            <history>The semicircle distribution was used by the physicist Eugene Wigner in the study of random matrices. The distribution was also used by Nikio Sato and John Tate in a conjecture in number theory.</history>
            <cite>abramowitz1972handbook</cite>
        </distribution>
        
       <distribution id="triangular">
            <name>triangular distribution</name>
            <name>triangle distribution</name>
            <type>continuous</type>
            <model>The triangular distribution arises from various simple combinations of continuous uniform distributions.</model>
            <parameter>\(a \in (-\infty, \infty)\), location, the left endpoint</parameter>
            <parameter>\( w \in (0, \infty) \), scale, the width of the interval</parameter>
            <parameter>\( p \in [0, 1] \), shape</parameter>
            <parameter>\(b = a + w\), the right endpoint</parameter>
            <parameter>\(c = a + p w\), the location of the vertex</parameter>
            <standard>\( a = 0, \; w = 1, \; p = \frac{1}{2} \)</standard>
            <support>\([a, a + w]\)</support>
            <pdf>\(f(x) = \begin{cases} \frac{2(x - a)}{(b - a)(c-a)} &amp; a \le x \leq c, \\ \frac{2(b - x)}{(b - a)(b - c)} &amp; c \lt x \le b  \end{cases} \)</pdf>
            <mode>\( c \)</mode>
            <cdf>\(F(x) = \begin{cases} \frac{(x - a)^2}{(b - a)(c - a)} &amp; a \le x \leq c, \\ 1 - \frac{(b - x)^2}{(b - a)(b - c)} &amp; c \lt x \le b \end{cases}\)</cdf>
            <qf>\(Q(u) = \begin{cases} a + \sqrt{(b - a)(c - a) u}, &amp; \; u \in [0, (c - a) / (b - a)] \\ b - \sqrt{(1 - u)(b - a)(b - c)}, &amp; u \in [(c - a) / (b - a), 1] \end{cases}\)</qf>
            <mgf>\(M(t) = 2 \frac{(b - c) e^{a t} - (b - a) e^{c t} + (c - a) e^{b t}}{(b - a)(c - a)(b - c) t^2}, \; t \in (-\infty, \infty)\)</mgf>
            <mean>\(\frac{a + b + c}{3}\)</mean>
            <variance>\(\frac{a^2 + b^2 + c^2 - a b - a c - b c}{18}\)</variance>
            <skew>\(\frac{\sqrt2(a + b - 2 c)(2 a - b - c)(a - 2 b + c)}{5 (a^2 + b^2 + c^2 - a b - a c - b c)^{3/2}}\)</skew>
            <kurt>\(-\frac{3}{5}\)</kurt>
            <entropy>\(\frac{1}{2} + \ln\left(\frac{b - a}{2}\right)\)</entropy>
            <median>\(\begin{cases} a + \frac{\sqrt{(b - a)(c - a)}}{\sqrt{2}}, &amp; c \ge \frac{a + b}{2}, \\ b - \frac{\sqrt{(b - a)(b - c)}}{\sqrt{2}}, &amp; c \le \frac{a + b}{2} \end{cases}\)</median>
            <q1>\(\begin{cases} a + \sqrt{\frac{1}{4}(b - a)(c - a)}, &amp; c \geq \frac{3}{4} a + \frac{1}{4} b \\ b - \sqrt{\frac{3}{4}(b - a)(b - c)} &amp; c \leq \frac{3}{4} a + \frac{1}{4} b \end{cases}\)</q1>
            <q3>\(\begin{cases} a + \sqrt{\frac{3}{4}(b - a)(c - a)}, &amp; c \geq \frac{1}{4} a + \frac{3}{4} b \\ b - \sqrt{\frac{1}{4}(b - a)(b - c)}, &amp; c \leq \frac{1}{4} a + \frac{3}{4} b \end{cases}\)</q3>
            <family>location</family>
            <family>scale</family>
            <cite>ren2002novel</cite>
        </distribution>

        <distribution id="U-quadratic">
            <name>U-quadratic distribution</name>
            <type>continuous</type>
            <model>the U-quadratic distribution models certain symmetric, bimodal variables.</model>
            <parameter>\(c \in (-\infty, \infty)\), location, center</parameter>
            <parameter>\( w \in (0, \infty) \), scale, radius</parameter>
            <parameter>\(a = c - w\), the left endpoint</parameter>
            <parameter>\(b = c + w \), the right endpoint</parameter>
            <standard>\( c = 0, \; w = 1 \)</standard>
            <support>\([a, b]\)</support>
            <pdf>\(f(x) = \frac{3}{2 w} \left(\frac{x - c}{w}\right)^2, \; x \in [a, b]\)</pdf>
            <mode>\(\{a, b\}\)</mode>
            <cdf>\( F(x) = \frac{1}{2}\left[1 + \left(\frac{x - c}{w}\right)^3\right], \; x \in [a, b] \)</cdf>
            <qf>\(Q(p) = c + w(2 p - 1)^{1/3}, \; p \in [0, 1]\)</qf>
            <mean>\(c = \frac{a + b}{2}\)</mean>
            <variance>\(\frac{3}{5} w^2 = \frac{3}{5}(b - a)^2\)</variance>
            <skew>\(0\)</skew>
            <kurt>\(-\frac{38}{21}\)</kurt>
            <median>\(c = \frac{a + b}{2}\)</median>
            <q1>\( c - \frac{1}{2^{1/3}} w \)</q1>
            <q3>\( c + \frac{1}{2^{1/3}} w \)</q3>
            <family>location</family>
            <family>scale</family>
            <cite>rubinstein1973comparative</cite>
        </distribution>

        <distribution id="von-Mises">
            <name>von Mises distribution </name>
            <name>circular normal distribution</name>
            <name>Tikhanov distribution</name>
            <type>continuous</type>
            <model>The von Mises distribution is used as an approximation to the wrapped normal distribution </model>
            <parameter>\(\mu \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(\beta \in (0, \infty)\), the concentration parameter</parameter>
            <support>any interval of length \( 2 I \)</support>
            <pdf>\(\frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)}\)</pdf>
            <mode>\(\mu\)</mode>
            <cdf></cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mean>\(\mu\)</mean>
            <variance>\(\textrm{var}(x)=1-I_1(\kappa)/I_0(\kappa)\)(circular)</variance>
            <skew></skew>
            <entropy>\(-\beta \frac{I_1(\beta)}{I_0(\beta)} + \ln[2 \pi I_0(\beta)]\) where \(I_n\) is the modfied Bessel function of order \(n\)</entropy>
            <median>\(\mu\)</median>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <family>location</family>
            <history>The von Mises distribution is named for Richard von Mises based on his work in diffusion processes.</history>
            <cite>abramowitz1972handbook</cite>
        </distribution>

        <distribution id="Wald">
            <name>Wald distribution </name>
            <name>inverse Gaussian distribution</name>
            <type>continuous</type>
            <model>The Wald distribution governs the time that Brownian Motion with positive drift reaches a fixed positive value.</model>
            <parameter>\(\mu \in (0, \infty)\), the mean</parameter>
            <parameter>\(\lambda \in (0, \infty)\), the shape parameter</parameter>
            <standard>\( \mu = 0, \; \lambda = 1 \)</standard>
            <support>\(x\in(0,\infty)\)</support>
            <pdf>\(\left[\frac{\lambda}{2\pi x^3}\right]^{1/2}\exp{\frac{-\lambda(x-\mu)^2}{2\mu^2x}}\)</pdf>
            <mode>\(\mu\left[\left(1+\frac{9\mu^2}{4\lambda^2}\right)^\frac{1}{2}-\frac{3\mu}{2\lambda}\right]\)</mode>
            <cdf>\(\Phi\left(\sqrt{\frac{\lambda}{x}}\left(\frac{x}{\mu}-1\right)\right)\)\(+\exp\left(\frac{2\lambda}{\mu}\right)\Phi\left(-\sqrt{\frac{\lambda}{x}}\left(\frac{x}{\mu}+1\right)\right)\) where \(\Phi\left(\right)\) is the standard normal distribution function.</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <mgf>\(M(t) = \exp \left[ \frac{\lambda}{\mu} \left(1 - \sqrt{1 - \frac{2 \mu^2}{\lambda} t} \right)\right], \; t \in (-\infty, \frac{\lambda}{2 \mu^2})\)</mgf>
            <cf>\(\varphi(t) = \exp \left[ \frac{\lambda}{\mu} \left(1 - \sqrt{1 - \frac{2 \mu^2}{\lambda} i t} \right)\right], \; t \in (-\infty, \infty)\)</cf>
            <mean>\(\mu\)</mean>
            <variance>\(\frac{\mu^3}{\lambda}\)</variance>
            <skew>\(3\left(\frac{\mu}{\lambda}\right)^{1/2}\)</skew>
            <kurt>\(\frac{15\mu}{\lambda}\)</kurt>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <history>The Wald distribution is named for Abraham Wald.</history>
            <cite>chhikara1988inverse</cite>
        </distribution>

        <distribution id="Weibull">
            <name>Weibull distribution</name>
            <type>continuous</type>
            <model>The Weibull distribution is used to model the failure times.</model>
            <parameter>\(k \in (0, \infty)\), the shape parameter</parameter>
            <parameter>\(\lambda \in (0, \infty)\), the scale parameter</parameter>
            <standard>\( k = 1, \; \lambda = 1 \)</standard>
            <support>\( (0, \infty) \)</support>
            <pdf>\(f(x) =\frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1} \exp\left[-\left(\frac{x}{\lambda}\right)^{k}\right], \; x \in (0, \infty)\)</pdf>
            <mode>0 if \( k = 1 \), \(\lambda\left(\frac{k-1}{k}\right)^{\frac{1}{k}}\) if \( k \gt 1 \)</mode>
            <cdf>\(F(x) = 1 - \exp\left[-\left(\frac{x}{\lambda}\right)^k\right], \; x \in (0, \infty)\)</cdf>
            <qf>\(Q(p) = \lambda \left[- \ln(1 - p)\right]^{1/k}, \; p \in (0, 1)\)</qf>
            <mgf>\(M(t) = \sum_{n=0}^\infty \frac{t^n \lambda^n}{n!} \Gamma\left(1 + \frac{n}{k}\right), \; t \in (-\infty, \infty), \; k \in (1, \infty)\) where \(\Gamma\) is the gamma function</mgf>
            <cf>\(\varphi(t) = \sum_{n=0}^\infty \frac{(i t)^n \lambda^n}{n!} \Gamma\left(1 + \frac{n}{k}\right), \; t \in (-\infty, \infty)\) where \(\Gamma\) is the gamma function.</cf>
            <mean>\(\lambda \Gamma(1 + 1/k)\)</mean>
            <variance>\(\lambda^2 \Gamma(1 + 2/k) - \mu^2\) where \( \mu \) is the mean</variance>
            <skew>\(\frac{\Gamma(1 + 3/k)\lambda^3 - 3\mu \sigma^2 - \mu^3}{\sigma^3}\) where \( \mu \) is the mean and \( \sigma \) is the standard deviation</skew>
            <kurt></kurt>
            <median>\(\lambda[\ln(2)]^{1/k}\)</median>
            <q1>\(\lambda [\ln(4) - \ln(3)]^{1/k}\)</q1>
            <q3>\(\lambda [\ln(4)]^{1/k}\)</q3>
            <family>exponential</family>
            <family>scale</family>
            <history>The Weibull distribution is named for Waloddi Weibull who published a paper on the distribution in 1951. The distribution was used earlier by Maurice Frechet. The term Weibull distribution was first used in 1955 in a paper by Julius Lieblein.</history>
            <cite>weibull1951statistical</cite>
        </distribution>

        <distribution id="zeta">
            <name>zeta distribution</name>
            <name>Zipf distribution</name>
            <type>discrete</type>
            <model>The zeta distribution models ranks and sizes of certain randomly chosen items.</model>
            <parameter>\(s \in [1, \infty)\), shape</parameter>
            <standard>\( s = 1 \)</standard>
            <support>\(\{1, 2, \ldots\}\)</support>
            <pdf>\(f(k) = \frac{1}{\zeta(s) k^s}, \; k \in \{1, 2, \ldots\}\) where \( \zeta \) is the zeta function</pdf>
            <mode>\(1\)</mode>
            <cdf>\(F(k) = \frac{H_{k,s}}{\zeta(s)}, \; k \in \{1, 2, \ldots\}\)</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in [0, 1)\) where \(F\) is the distribution function</qf>
            <mean>\(\frac{\zeta(s - 1)}{\zeta(s)}\) for \(s \gt 2\)</mean>
            <variance>\(\frac{\zeta(s)\zeta(s - 2) - \zeta(s - 1)^2}{\zeta(s)^2}\) for \(s \gt 3\)</variance>
            <median>\( Q\left(\frac{1}{2}\right) \) where \( Q \) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the quantile function</q3>
            <history>The Zipf distribution is named for the American linguist George Kingsley Zipf, who studied the distribution in the context of the frequency of words.</history>
            <cite>johnson2005univariate</cite>
        </distribution>

        <distribution id="location-scale">
            <name>location-scale distribution</name>
            <type>continuous</type>
            <model>Location scale distributions correspond to linear transformations (with positive slope) of a basic random variable, and often correspond to a change of units in a physical problem.</model>
            <parameter>the standard distribution, a continuous distribution with support on an interval \(S_0\)</parameter>
            <parameter>\(\mu \in (-\infty, \infty)\), the location parameter</parameter>
            <parameter>\(\sigma \in (0, \infty)\), the scale parameter</parameter>
            <support>\(S = \{\mu + \sigma x: x \in S_0\}\)</support>
            <pdf>\(f(x) = \frac{1}{\sigma} f_0\left(\frac{x - \mu}{\sigma}\right), \; x \in S\) where \(f_0\) is the probability density function of the standard distribution</pdf>
            <mode>\(\mu + \sigma x_0\) where \(x_0\) is a mode of the standard distribution</mode>
            <cdf>\(F(x) = F_0\left(\frac{x - \mu}{\sigma}\right), \; x \in S\) where \(F_0\) is the distribution function of the standard distribution</cdf>
            <qf>\(Q(p) = \mu + \sigma Q_0(p), \; p \in (0, 1)\) where \(Q_0\) is the quantile function of the standard distribution</qf>
            <mgf>\(M(t) = e^{\mu t} M_0(\sigma t)\) where \(M_0\) is the moment generating function of the standard distribution</mgf>
            <cf>\(\varphi(t) = e^{i \mu t} \varphi_0(\sigma t)\) where \(\phi\) is the characteristic function of the standard distribution</cf>
            <moments type="raw">\(m(n) = \sum_{i=0}^n {n \choose i} \sigma^i \mu^{n-i} m_0(i), \; n \in \{1, 2, \ldots\}\) where \(m_0(i)\) is the \(i\)th raw moment of the standard distribution</moments>
            <mean>\(\mu + \sigma \mu_0\) where \(\mu_0\) is the mean of the standard distribution</mean>
            <variance>\(\sigma^2 \sigma_0^2\) where \(\sigma_0^2\) is the variance of the standard distribution</variance>
            <skew>\(\gamma_{0,1}\) where \(\gamma_{0,1}\) is the skewness of the standard distribution</skew>
            <kurt>\(\gamma_{0,2}\) where \(\gamma_{0,2}\) is the kurtosis of the standard distribution</kurt>
            <entropy>\(\ln(\sigma) + I_0\) where \(I_0\) is the entropy of the standard distribution</entropy>
            <median>\(\mu + \sigma q_{0,2}\) where \(q_{0,2}\) is the median of the standard distribution</median>
            <q1>\(\mu + \sigma q_{0,1}\) where \(q_{0,1}\) is the first quartile of the standard distribution</q1>
            <q3>\(\mu + \sigma q_{0,3}\) where \(q_{0,3}\) is the third quartile of the standard distribution</q3>
            <cite>meyer1987two</cite>
        </distribution>

        <distribution id="folded-normal">
            <name>folded normal distribution</name>
            <type>continuous</type>
            <model>The folded normal distribution governs \(\left|X\right|\) when \(X\) has a normal distribution</model>
            <parameter>\(\mu \in (-\infty, \infty)\), the normal mean</parameter>
            <parameter>\(\sigma \in (0, \infty\), the normal standard deviation</parameter>
            <standard>\( \mu = 0, \; \sigma = 1 \)</standard>
            <support>\([0, \infty)\)</support>
            <pdf>\(f(x) = \frac{1}{\sigma \sqrt{2\pi}} \, \exp \left( -\frac{(-x - \mu)^2}{2\sigma^2} \right) + \frac{1}{\sigma \sqrt{2 \pi}} \, \exp \left(-\frac{(x - \mu)^2}{2 \sigma^2} \right), \; x \in [0, \infty) \) </pdf>
            <cdf>\( F(x) = \Phi\left(\frac{x - \mu}{\sigma}\right) + \Phi\left(\frac{x + \mu}{\sigma}\right) - 1, \; x \in [0, \infty) \) where \( \Phi \) is the standard normal distribution function</cdf>
            <qf>\(F^{-1}(p), p \in (0, 1)\) where \(F\) is the distribution funciton</qf>
            <mean>\(\sigma \sqrt{\frac{2}{\pi}} \exp\left(-\frac{\mu^2}{2 \sigma^2} \right) + \mu \left[1 - 2 \Phi\left(-\frac{\mu}{\sigma}\right) \right]\) where \(\Phi\) is the standard normal distribution function</mean>
            <variance>\( \mu^2 + \sigma^2 - \left\{ \sigma \sqrt{2/\pi} \exp(-\mu^2 / 2 \sigma^2) + \mu\left[1 - 2 \Phi(-\mu / \sigma)\right] \right\}^2 \) where \( \Phi \) is the standard normal distribution function</variance>
            <median>\(Q(\frac{1}{2})\) where \(Q\) is the quantile function</median>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the qunatile function</q3>
            <cite>leone1961folded</cite>
        </distribution>

        <distribution id="half-normal">
            <name>half normal distribution</name>
            <type>continuous</type>
            <model>The half normal distribution governs \(|X|\) when \(X\) has a normal disstribution with mean 0.</model>
            <parameter>\(\sigma \in (0, \infty) \), the scale parameter</parameter>
            <standard>\( \sigma = 1 \)</standard>
            <support>\([0, \infty)\)</support>
            <pdf>\(f(x) = \frac{\sqrt{2}}{\sigma \sqrt{\pi}} \exp \left(-\frac{x^2}{2 \sigma^2} \right), \; x \in (0, \infty)\)</pdf>
            <mode>0</mode>
            <mean>\(\sigma \sqrt{\frac{2}{\pi}}\)</mean>
            <variance>\( \sigma^2 \left(1 - \frac{2}{\pi}\right) \)</variance>
            <entropy>\( \frac{1}{2} \ln \left( \frac{ \pi \sigma^2 }{2} \right) + \frac{1}{2}\)</entropy>
            <moments type="raw">\(\mu(n) = \frac{\pi^{(n - 1) / 2}}{\sigma^n} \Gamma\left(\frac{1}{2}(n + 1) \right)\) where \(\Gamma\) is the gamma function</moments >
            <skew>\(\frac{\sqrt{2}(4 - \pi)}{(\pi - 2)^{3/2}}\)</skew>
            <kurt>\(\frac{8(\pi - 3)}{(\pi - 2)^2}\)</kurt>
            <family>scale</family>
            <cite>pescim2010beta</cite>
        </distribution>

        <distribution id="birthday">
            <name>birthday distribution</name>
            <name>occupancy distribution</name>
            <type>discrete</type>
            <model>This distribution models the number of empty cells when \(n\) balls are distributed at random into \(m\) cells</model>
            <parameter>\(m \in \{1, 2, \ldots\}\), the number of cells</parameter>
            <parameter>\(n \in \{1, 2, \ldots\}\), the number of balls</parameter>
            <support>\(\{\max\{m-n, 0\}, \ldots, m - 1\}\)</support>
            <pdf>\(f(x) = \binom{m}{x} \sum_{j=0}^{m-x} (-1)^j \binom{m - x}{j} \left(1 - \frac{x + j}{m}\right)^n, \quad x \in \{\max\{m-n,0\}, \ldots, m-1\}\)</pdf>
            <cdf>\(F(x) = \sum_{j = 0}^x f(j), \quad x \in \{0, 1, \ldots, n\}\) where \(f\) is the probability density function </cdf>
            <moments type="factorial">\(\mu_{(k)} = \frac{m!}{(m - k)!} \left(\frac{m - k}{m} \right)^n, \quad k \in \{1, 2, \ldots\}\)</moments>
            <gf type="probability">\(G(t) = \sum_{k=0}^m \binom{m}{k} \left(\frac{m - k}{m}\right)^n (t - 1)^k, \quad t \in R\)</gf>
            <mean>\(m \left(1 - \frac{1}{m}\right)^n\)</mean>
            <variance>\(m (m - 1) \left(1 - \frac{2}{m}\right)^n + m \left(1 - \frac{1}{m}\right)^n - m^2 \left(1 - \frac{1}{m}\right)^{2n}\)</variance>
            <skew>\(\frac{\mu_3 - 3 \mu_1 \mu_2 + 2 \mu_1^2}{\sigma^3}\) where \(\mu_i\) is the \(i\)th raw moment and \(\sigma\) is the standard deviation</skew>
            <kurt type="excess">\(\frac{\mu_4 - 4 \mu_1 \mu_3 + 6 \mu_1^2 -3 \mu_1^4}{\sigma^4} - 3\) where \(\mu_i\) is the \(i\)th raw moment and \(\sigma\) is the standard deviation</kurt>
            <qf>\(Q(p) = F^{-1}(p), \quad p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <median>\(Q(\frac{1}{2})\) where \(Q\) is the quantile function</median>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the qunatile function</q3>
            <entropy>\(H = -\sum_{x=0}^n \log[f(x)] f(x)\) where \(f\) is the probability density function</entropy>
            <cite>munford1977note</cite>
        </distribution>

        <distribution id="matching">
            <name>matching distribution</name>
            <type>discrete</type>
            <model>The matching distribution governs the number of matches in a random permutation of \(\{1, 2, \ldots, n\}\)</model>
            <parameter>\(n \in \{2, 3, \ldots\}\), the number of objects permuted</parameter>
            <support>\(\{0, 1, \ldots, n\}\)</support>
            <pdf>\(f(x) = \frac{1}{x!} \sum_{j=0}^{n - x} \frac{(-1)^j}{j!}, \; x \in \{0, 1, \ldots, n\}\)</pdf>
            <mode>\(0\) if \(n\) is even; \(1\) if \(n\) is odd</mode>
            <cdf>\(F(x) = \sum_{j = 0}^x f(j), \; x \in \{0, 1, \ldots, n\}\) where \(f\) is the probability density function </cdf>
            <moments type="factorial">\(\mu_{(k)} = 1, \; k \in \{1, 2, \ldots, n\}; \quad \mu_{(k)} = 0, \; k \in \{n + 1, n + 2, \ldots\}\)</moments>
            <gf type="factorial-moment">\(G(t) = \sum_{k=1}^n \frac{(t-1)^k}{k!}, \; t \in R\)</gf>
            <mean>\(1\)</mean>
            <variance>\(1\)</variance>
            <skew>\(\frac{\mu_3 - 3 \mu_1 \mu_2 + 2 \mu_1^2}{\sigma^3}\) where \(\mu_i\) is the \(i\)th raw moment and \(\sigma\) is the standard deviation</skew>
            <kurt type="excess">\(\frac{\mu_4 - 4 \mu_1 \mu_3 + 6 \mu_1^2 -3 \mu_1^4}{\sigma^4} - 3\) where \(\mu_i\) is the \(i\)th raw moment and \(\sigma\) is the standard deviation</kurt>
            <qf>\(Q(p) = F^{-1}(p), \quad p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <median>\(Q\left(\frac{1}{2}\right)\) where \(Q\) is the quantile function</median>
            <q1>\(Q\left(\frac{1}{4}\right)\) where \(Q\) is the quantile function</q1>
            <q3>\(Q\left(\frac{3}{4}\right)\) where \(Q\) is the qunatile function</q3>
            <entropy>\(H = -\sum_{x=0}^n \log[f(x)] f(x)\) where \(f\) is the probability density function</entropy>
            <history>The matching problem was first formulated by Pierre-Redmond Montmort.</history>
            <cite></cite>
        </distribution>

        <distribution id="coupon">
            <name>coupon-collector distribution</name>
            <type>discrete</type>
            <model>This distribution models the number number of samples needed to obtain \(k\) distinct values when sampling at random, with replacement from a population of \(m\) objects</model>
            <parameter>\(m \in \{1, 2, \ldots\}\), the population size</parameter>
            <parameter>\(k \in \{1, 2, \ldots\}\), the number of distinct values to be obtained</parameter>
            <support>\(\{k, k + 1, \ldots\}\)</support>
            <pdf>\(f(x) = \binom{m - 1}{k - 1} \sum_{j=0}^{k-1} \binom{k-1}{j} \left(\frac{k - j - 1}{m}\right)^{x-1}, \quad x \in \{k, k + 1, \ldots\}\)</pdf>
            <cdf>\(F(x) = \sum_{j = 0}^x f(j), \quad x \in \{k, k + 1, \ldots\}\) where \(f\) is the probability density function </cdf>
            <gf type="probability">\(G(t) = \prod_{i=1}^k \frac{m - (i - 1)}{m - (i - 1)t}, \quad |t| \lt \frac{m}{k - 1} \)</gf>
            <mean>\(\sum_{i=1}^k \frac{m}{m - i + 1}\)</mean>
            <variance>\(\sum_{i=1}^k \frac{(i-1)m}{(m - i + 1)^2}\)</variance>
            <cite>motwani1995randomized</cite>
        </distribution>
        
        <distribution id="finite-order">
            <name>finite order statistic distribution</name>
            <type>discrete</type>
            <model>This distribution models an order statistic when a sample is chosen at random, without replacement, from a finite, ordered population</model>
            <parameter>\(m \in \{1, 2, \ldots\}\), the population size</parameter>
            <parameter>\(n \in \{1, 2, \ldots, m\}\), the sample size</parameter>
            <parameter>\(k \in \{1, 2, \ldots, n\}\), the the order</parameter>
            <support>\(\{k, k + 1, \ldots, m - n + 1\}\)</support>
            <pdf>\(f(x) = \frac{\binom{x-1}{k-1} \binom{m-x}{n-k}}{\binom{m}{n}}, \quad x \in \{k, k + 1, \quad m - n + 1\}\)</pdf>
            <mean>\(k \frac{m+1}{n+1}\)</mean>
            <variance>\(k(n - k + 1) \frac{(m + 1)(m - n)}{(m + 1)^2 (n + 2)}\)</variance>
        </distribution>

        <distribution id="Erlang">
            <name>Erlang distribution</name>
            <type>continuous</type>
            <model>The Erlang probability distribution is related exponential and Gamma distributions
                and is used to examine the number of event arrivals. For instance telephone calls which
                might be made at the same time to the operators of the switching stations. This work
                on telephone traffic engineering has been expanded to consider waiting times in queueing
                systems in general</model>
            <parameter>\(k \in \mathbb{N}\), shape parameter </parameter>
            <parameter>\(\lambda > 0\), rate parameter</parameter>
            <parameter>\(\theta = 1/\lambda > 0\), scale parameter</parameter>
            <support>\(\scriptstyle x\;\in\;[0,\,\infty)\!\)</support>
            <pdf>\(\scriptstyle\frac{\lambda^k x^{k-1}e^{-\lambda x}}{(k-1)!\,}\)</pdf>
            <cdf>\(\scriptstyle\frac{\gamma(k,\,\lambda x)}{(k\,-\,1)!}\;=\;1\,-\,\sum_{n=0}^{k-1}\frac{1}{n!}e^{-\lambda x}(\lambda x)^{n}\)</cdf>
            <mean>\(\scriptstyle\frac{k}{\lambda}\,\)</mean>
            <median>No simple closed form</median>
            <variance>\(\scriptstyle\frac{k}{\lambda^2}\,\)</variance>
            <cite>angusintroduction</cite>
        </distribution>

        <distribution id="generalized-gamma">
            <name>Generalized Gamma distribution</name>
            <type>continuous</type>
            <model>The generalized gamma distribution is not often used to model life data by itself, but it is sometimes used to determine which of those life distributions should be used to model a particular set of data.</model>
            <parameter>\(\alpha \in (0, \infty)\), the scale parameter</parameter>
            <parameter>\(\beta \in (0, \infty)\), the shape parameter</parameter>
            <parameter>\(\gamma \in (0, \infty)\), the shape parameter</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x; a, d, p) = \frac{(p/a^d) x^{d-1} e^{-(x/a)^p}}{\Gamma(d/p)}\)</pdf>
            <cdf>\(F(x; a, d, p) = \frac{\gamma(d/p, (x/a)^p)}{\Gamma(d/p)}\) where \(\gamma\) denotes incomplete gamma function</cdf>
            <cite>stacy1962generalization</cite>
        </distribution>

        <distribution id="Makeham">
            <name>Makeham distribution</name>
            <name>Gompertzâ€“Makeham law of mortality</name>
            <type>continuous</type>
            <model>The Gompertzâ€“Makeham law states that the death rate is the sum of an age-independent component and an age-dependent component, which increases exponentially with age.</model>
            <parameter>\(\gamma \in (0, \infty)\)</parameter>
            <parameter>\(\delta \in (0, \infty)\)</parameter>
            <parameter>\(\kappa \in (0, \infty)\)</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x) = (\gamma + \delta\kappa^x)exp(-\gamma x-\frac{\delta(\kappa^x-1)}{log(\kappa)})\)</pdf>
            <cdf>\(1-\exp(-\lambda x-\frac{\alpha}{\beta}(e^{\beta x}-1))\)</cdf>
            <cite>gavrilov1983human</cite>
        </distribution>

        <distribution id="hypoexponential">
            <name>hypoexponential distribution</name>
            <name>generalized Erlang distribution</name>
            <type>continuous</type>
            <model>The hypoexponential distribution has a coefficient variation less than one, compared to the hyperexponential distribution which has a coefficient variation greater than one and the exponential distribution which has a coefficient variation of one.</model>
            <parameter>\(\alpha_1,...,\alpha_n \in (0, \infty), \alpha_i \neq \alpha_j for i \neq j\) </parameter>
            <support>\([0, \infty)\)</support>
            <pdf>\(f(x) = \sum_{i=1}^{n}(\alpha_i)exp(-x/\alpha_i)(\prod_{j=1,j\neq i}^{n}\frac{\alpha_i}{\alpha_i-\alpha_j})\)</pdf>
            <cdf>Expressed as a phase-type distribution: \(1-\boldsymbol{\alpha}e^{x\Theta}\boldsymbol{1}\)</cdf>
            <mgf>\(\boldsymbol{\alpha}(tI-\Theta)^{-1}\Theta\mathbf{1}\)</mgf>
            <mean>\(\sum^{k}_{i=1}1/\lambda_{i}\,\)</mean>
            <mode>\((k-1)/\lambda\) if \(\lambda_{k} = \lambda\)</mode>
            <variance>\(\sum^{k}_{i=1}1/\lambda^2_{i}\)</variance>
            <median>\(\ln(2)\sum^{k}_{i=1}1/\lambda_{i}\)</median>
            <skew>\(2(\sum^{k}_{i=1}1/\lambda_{i}^3)/(\sum^{k}_{i=1}1/\lambda_{i}^2)^{3/2}\)</skew>
            <cite>bolch2006queueing</cite>
        </distribution>

        <distribution id="doubly-noncentral-t">
            <!--formula not given for this distribution-->
            <name>doubly noncentral t distribution</name>
            <type>continuous</type>
            <model>The doubly noncentral t distribution is an extended version of the singly noncentral t distribution in that it has two noncentrality parameters instead of just one.</model>
            <history>See http://onlinelibrary.wiley.com/doi/10.1111/j.1467-842X.1969.tb00102.x/pdf </history>
            <cite>krishnan1968series</cite>
        </distribution>

        <distribution id="hyperexponential">
            <name>hyperexponential distribution</name>
            <type>continuous</type>
            <model>The hyperexponential distribution has a coefficient variation greater than one, compared to the hypoexponential distribution which has a coefficient variation less than one and the exponential distribution which has a coefficient variation of one.</model>
            <parameter>\(\alpha_1,...,\alpha_n \in (0, \infty), \alpha_i \neq \alpha_j for i \neq j\)</parameter>
            <parameter>\(p_i &gt; 0, \sum_{i=1}^{n} p_i = 1\)</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x) = \sum_{i=1}^{n}\frac{p_i}{\alpha_i}e^{-x/\alpha_i}\)</pdf>
            <cite>singh2007estimation</cite>
        </distribution>

        <distribution id="Muth">
            <!--can't find a model description-->
            <name>Muth distribution</name>
            <type>continuous</type>
            <model>The Muth distribution is related to reliability models. It has mostly
                a theoretical interest. Muth distribution has two basic properties: (i) the
                mode of this random model is a function involving the golden ratio and (ii)
                the second non-central moment can be expressed in terms of the exponential
                integral function. The moments of higher order cannot be expressed in a simple way.
                The Muth distribution does not have the variate generation property for
                simulation purposes. Its quantile function can be expressed in closed form in
                terms of the negative branch of the Lambert W function. The limit distributions
                of the maxima and minima of the Muth distribution are the Gumbel and Weibull
                distributions, respectively.</model>
            <parameter>\(\kappa \in [0, 1]\), the shape parameter</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x) = (e^{\kappa x}-\kappa)e^{-(1/\kappa)e^{\kappa x}+\kappa x+1/\kappa}\)</pdf>
            <cite>muth1960optimal</cite>
        </distribution>

        <distribution id="error">
            <name>generalized error distribution</name>
            <name>generalized normal distribution</name>
            <name>generalized Gaussian distribution</name>
            <name>exponential power distribution</name>
            <type>continuous</type>
            <model>The error distribution is a parametric family of symmetric distributions. It adds a shape parameter to the normal distribution.</model>
            <parameter>\(a \in (-\infty, \infty)\), the mean</parameter>
            <parameter>\(b \in (0, \infty)\), the scale parameter</parameter>
            <parameter>\(c \in (0, \infty)\), the shape parameter</parameter>
            <support>\((-\infty, \infty)\)</support>
            <pdf>\(f(x) = \frac{exp[-(|x-a|/b)^{2/c}/2]}{b 2^{c/2+1}\Gamma(1+c/2)}\)</pdf>
            <cite>hosking2005regional</cite>
        </distribution>

        <distribution id="minimax">
            <name>minimax distribution</name>
            <type>continuous</type>
            <model>The Minimax distribution is an alternative two-parameter distribution of the Beta distribution.</model>
            <parameter>\(\beta \in (0, \infty)\)</parameter>
            <parameter>\(\gamma \in (0, \infty)\)</parameter>
            <support>\((0, 1)\)</support>
            <pdf>\(f(x) = \beta\gamma x^{\beta-1}(1-x^\beta)^{\gamma-1}\)</pdf>
            <cite>marchand2002minimax</cite>
        </distribution>

        <distribution id="noncentral-F">
            <name>noncentral F distribution</name>
            <type>continuous</type>
            <model>The noncentral F distribution is a generalization of the ordinary F distribution.</model>
            <parameter>\(\delta \in (0, \infty)\)</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x) = \sum_{i=0}^{\infty}\frac{\Gamma(\frac{2i+n_1+n_2}{2})(n_1/n_2)^{(2i+n_1)/2}x^{(2i+n_1-2)/2}e^{-\delta/2}(\delta/2)^i}{\Gamma(n_2/2)\Gamma(\frac{2i+n_1}{2})i!(1+\frac{n_1}{n_2}x)^{(2i+n_1+n_2)/2}}\)</pdf>
        </distribution>

        <distribution id="IDB">
            <name>increasing-decreasing-bathtub distribution</name>
            <type>continuous</type>
            <model>The IDB distribution can be used to model either an increasing, decreasing or bathtub shaped failure rate function, which is a combination of a linearly increasing failure rate and a decreasing failure rate function</model>
            <parameter>\(\delta \in (0, \infty)\)</parameter>
            <parameter>\(\kappa \in (0, \infty)\)</parameter>
            <parameter>\(\gamma \in [0, \infty)\)</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x) = \frac{(1+\kappa x)\delta x+\gamma}{(1+\kappa x)^{\gamma/\kappa+1}}e^{-\delta x^2/2}\)</pdf>
            <cdf>\(F(x|d_1,d_2,\lambda)=\sum\limits_{j=0}^\infty\left(\frac{\left(\frac{1}{2}\lambda\right)^j}{j!}e^{-\frac{\lambda}{2}}\right)I\left(\frac{d_1F}{d_2 + d_1F}\bigg|\frac{d_1}{2}+j,\frac{d_2}{2}\right)\) where \(I\) is the regularized incomplete beta function</cdf>
            <mean><![CDATA[\(
            \mbox{E}\left[F\right]=
            \begin{cases}
            \frac{\nu_2(\nu_1+\lambda)}{\nu_1(\nu_2-2)}
            &\nu_2>2\\
            \mbox{Does not exist}
            &\nu_2\le2\\
            \end{cases}\)
        ]]></mean>
            <variance><![CDATA[\(
            \mbox{Var}\left[F\right]=
            \begin{cases}
            2\frac{(\nu_1+\lambda)^2+(\nu_1+2\lambda)(\nu_2-2)}{(\nu_2-2)^2(\nu_2-4)}\left(\frac{\nu_2}{\nu_1}\right)^2
            &\nu_2>4\\
            \mbox{Does not exist}
            &\nu_2\le4.\\
            \end{cases}\)
        ]]></variance>
            <cite>kay1993fundamentals</cite>
        </distribution>

        <distribution id="Benford">
            <name>Benford's law</name>
            <name>first digit law</name>
            <name>significant digit law</name>
            <type>discrete</type>
            <model>Benford's law states that in lists of numbers from many (but not all) real-life sources of data, the leading digit is distributed in a specific, non-uniform way.</model>
            <parameter>\(b \in \{2, 3, \ldots\}\), the base</parameter>
            <standard>\(b = 10\)</standard>
            <support>\(\{1, 2, \ldots, b - 1\}\)</support>
            <pdf>\(f(x) = \log_b(x + 1)- \log_b(x) = \log_b\left(\frac{x + 1}{x}\right), \; x \in \{1, 2, \ldots, b - 1\}\)</pdf>
            <cite>hill1995statistical</cite>
        </distribution>

        <distribution id="doubly-noncentral-F">
            <name>doubly noncentral F distribution</name>
            <type>continuous</type>
            <model>The doubly noncentral F distribution is an extended version of the singly noncentral F distribution in that it has two noncentrality parameters instead of just one.</model>
            <parameter>\(\delta \in (0, \infty)\)</parameter>
            <parameter>\(\gamma \in (0, \infty)\)</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x)= \sum_{j=0}^{\infty}\sum_{k=0}^{\infty}[\frac{e^{-\delta/2}(\frac{1}{2}\delta)^j}{j!}][\frac{e^{-\gamma/2}(\frac{1}{2}\gamma)^k}{k!}]\times n_1^{(n_1/2)+j}n_2^{(n_2/2)+k}x^{(n_1/2)+j-1}\times (n_2+n_1 x)^{-\frac{1}{2}(n_1+n_2)-j-k}\times [B(\frac{1}{2}n_1+j,\frac{1}{2}n_2+k)]^{-1}\)</pdf>
            <cite>bulgren1971representations</cite>
        </distribution>

        <distribution id="TSP">
            <name>two-sided power distribution</name>
            <type>continuous</type>
            <model>The two-sided power distribution is an alternative to the triangular distribution, allowing for a nonlinear distribution. Triangular and uniform distributions are special cases of the two-sided power distribution.</model>
            <parameter>\(n \in (0, \infty)\)</parameter>
            <parameter>\(a \in (-\infty, \infty)\)</parameter>
            <parameter>\(b \in (a, \infty)\)</parameter>
            <parameter>\(m=(b-a)\theta+a\)</parameter>
            <support>\((a, b)\)</support>
            <pdf>\(f(x) = \begin{cases} \frac{n}{b-a}(\frac{x-a}{m-a})^{n-1}, a\lt x\le m \\  \frac{n}{b-a}(\frac{b-x}{b-m})^{n-1}, m\le x\lt b \end{cases}\)</pdf>
            <cite>van2002standard</cite>
        </distribution>

        <distribution id="extreme-value">
            <name>extreme value distribution</name>
            <name>Gumbel distribution</name>
            <type>continuous</type>
            <model>The Extreme Value distribution is the limiting distribution of the minimum of a large number of unbounded identically distributed random variables.</model>
            <parameter>\(a \in (-\infty, \infty)\), location</parameter>
            <parameter>\(b \in (0, \infty)\), scale</parameter>
            <standard>\( a = 0, \; b = 1 \)</standard>
            <support>\((-\infty, \infty)\)</support>
            <pdf>\( f(x) = \frac{1}{b} \exp\left(-\frac{x - a}{b}\right) \exp\left[-\exp\left(-\frac{x - a}{b}\right)\right], \; x \in (-\infty, \infty) \)</pdf>
            <mode>\( a \)</mode>
            <cdf>\( F(x) = \exp\left[-\exp\left(-\frac{x - a}{b}\right)\right], \; x \in (-\infty, \infty) \)</cdf>
            <qf>\( F^{-1}(p) = a - b \ln[-\ln(p)], \; p \in (0, 1) \)</qf>
            <mgf>\( M(t) = e^{a t} \Gamma(1 - b t), \; t \in (-\infty, 1/b) \) where \( \Gamma \) is the gamma function</mgf>
            <mean>\( a + b \gamma \) where \( \gamma \) is Euler's constant</mean>
            <variance>\( b^2 \pi / 6 \)</variance>
            <skew>\( 12 \sqrt{6} \zeta(3) / \pi^3 \)</skew>
            <kurt>\( 12/5 \)</kurt>
            <median>\( a - b \ln[\ln(2)] \)</median>
            <q1>\(a - b \ln[\ln(4) - \ln(3)]\)</q1>
            <q3>\(a - b \ln[\ln(4)]\)</q3>
            <entropy>\( \ln(b) + \gamma + 1 \)</entropy>
            <family>location</family>
            <family>scale</family>
            <history>The Gumbel distribution is named for Emil Gumbel, who derived it in his study of extreme values in 1954.</history>
            <cite>embrechts2011modelling</cite>
        </distribution>

        <distribution id="Lomax">
            <name>Lomax distribution</name>
            <name>Pareto Type II distribution</name>
            <type>continuous</type>
            <model>The Lomax distribution is essentially a Pareto distribution that has been shifted so that its support begins at zero.</model>
            <parameter>\(\kappa \in (0, \infty)\), the shape parameter</parameter>
            <parameter>\(\lambda \in (0, \infty)\), the scale parameter</parameter>
            <support>\(x\ge0\)</support>
            <pdf>\({\alpha\over\lambda}\left[{1+{x\over\lambda}}\right]^{-(\alpha+1)}\)</pdf>
            <cdf>\(e^{-t(x)},\,\)</cdf>
            <mean><![CDATA[
            \(\begin{cases}\mu + \sigma\frac{\Gamma(1-\xi)-1}{\xi} & \text{if}\ \xi\neq 0,\xi<1,\\ \mu + \sigma\,\gamma & \text{if}\ \xi=0,\\ \infty & \text{if}\ \xi\geq 1,\end{cases}\) where \(\gamma\) is Eulerâ€™s constant
        ]]></mean>
            <median><![CDATA[
            \(\begin{cases}\mu + \sigma \frac{(\ln2)^{-\xi}-1}{\xi} & \text{if}\ \xi\neq0,\\ \mu - \sigma \ln\ln2 & \text{if}\ \xi=0.\end{cases}\)
        ]]></median>
            <mode><![CDATA[
            \(\begin{cases}\mu + \sigma \frac{(1+\xi)^{-\xi}-1}{\xi} & \text{if}\ \xi\neq0,\\ \mu & \text{if}\ \xi=0.\end{cases}\)
        ]]></mode>
            <variance><![CDATA[
            \(\begin{cases}\sigma^2\,(g_2-g_1^2)/\xi^2 & \text{if}\ \xi\neq0,\xi<\frac12,\\ \sigma^2\,\frac{\pi^2}{6} & \text{if}\ \xi=0, \\ \infty & \text{if}\ \xi\geq\frac12,\end{cases}\)
        ]]></variance>
            <skew><![CDATA[
            \(\begin{cases}\frac{g_3-3g_1g_2+2g_1^3}{(g_2-g_1^2)^{3/2}} & \text{if}\ \xi\neq0,\\ \frac{12 \sqrt{6} \zeta(3)}{\pi^3} & \text{if}\ \xi=0.\end{cases}\) where \(\zeta(x)\) is Riemann zeta function
        ]]></skew>
            <kurt><![CDATA[
            \(\begin{cases}\frac{g_4-4g_1g_3+6g_2g_1^2-3g_1^4}{(g_2-g_1^2)^{2}}-3  & \text{if}\ \xi\neq0,\\ \frac{12}{5} & \text{if}\ \xi=0.\end{cases}\)
        ]]></kurt>
            <entropy>\(\log(\sigma)\,+\,\gamma\xi\,+\,(\gamma+1)\)</entropy>
            <cite>coles2001introduction</cite>
        </distribution>

        <distribution id="generalized-Pareto">
            <name>generalized Pareto distribution</name>
            <type>continuous</type>
            <model>The generalized Pareto distribution allows a continuous range of possible shapes that includes both the exponential and Pareto distributions as special cases. </model>
            <parameter>\(\kappa \in (-\infty, \infty)\), the shape parameter</parameter>
            <parameter>\(\sigma \in (0, \infty)\), the scale parameter</parameter>
            <parameter>\(\mu \in (0, \infty)\), the mean</parameter>
            <support><![CDATA[
            \(x\geqslant\mu\,\;(\xi\geqslant0)\)<br/>\(\mu\leqslant x\leqslant\mu-\sigma/\xi\,\;(\xi<0)\)
        ]]></support>
            <pdf>\(\frac{1}{\sigma}(1+\xi z)^{-(1/\xi+1)}\)<br/>where\(z=\frac{x-\mu}{\sigma}\)</pdf>
            <cdf>\(1-(1+\xi z)^{-1/\xi}\,\)</cdf>
            <mean><![CDATA[
            \(\mu+\frac{\sigma}{1-\xi}\,\;(\xi<1)\)
        ]]></mean>
            <variance><![CDATA[
            \(\frac{\sigma^2}{(1-\xi)^2(1-2\xi)}\,\;(\xi<1/2)\)
        ]]></variance>
            <median>\(\mu+\frac{\sigma(2^{\xi}-1)}{\xi}\)</median>
            <cite>chotikapanich2008modeling</cite>
        </distribution>

        <distribution id="Kolmogorov-Smirnov">
            <name>Kolmogorov-Smirnov test</name>
            <name>Kolmogorov distribution</name>
            <type>continuous</type>
            <model>The Kolmogorov-Smirnov test can be modified to serve as a goodness of fit test.</model>
            <parameter>none</parameter>
            <support>\((0, \infty)\)</support>
            <pdf>\(f(x)=1-2[\exp{-x^2}-\exp{-4 x^2}+\exp{-9 x^2}-\exp{-16 x^2}+...]\)</pdf>
            <cite>kolmogorov1933sulla</cite>
        </distribution>

        <distribution id="logistic-exponential">
            <name>logistic-exponential distribution</name>
            <type>continuous</type>
            <type>infinte</type>
            <pdf>\( \alpha = \beta = \frac{1}{2} \)</pdf>
            <cite>lan2008logistic</cite>
        </distribution>

        <distribution id="power-function">
            <name>power-function distribution</name>
            <type>continuous</type>
            <type>nonsymmetric</type>
            <type>finite</type>
            <pdf>\( f(x) = \frac {\alpha(x-a)^{\alpha-1}} {(b-a)^\alpha}  \)</pdf>
            <cite>moothathu1986characterization</cite>
        </distribution>

        <distribution id="student-t-non-central">
            <name>student t non-central distribution</name>
            <type>continuous</type>
            <type>nonsymmetric</type>
            <type>infinite</type>
            <pdf>\( f(t)=\frac{\nu^{\nu/2}e^{-\nu\mu^2/2(t^2+\nu)}} {\sqrt{\pi}\Gamma(\nu/2)2^{(\nu-1)/2}(t^2+\nu)^{(\nu+1)/2}} \times\int\limits_0^\infty x^\nu\exp\left[-\frac{1}{2}\left(x-\frac{\mu t}{\sqrt{t^2+\nu}}\right)^2\right]dx  \)</pdf>
            <cdf><![CDATA[
            \(F_{\nu,\mu}(x)=
            \begin{cases}\tilde{F}_{\nu,\mu}(x), & \mbox{if } x\ge 0; \\
            1-\tilde{F}_{\nu, -\mu}(-x), &\mbox{if } x < 0,
            \end{cases}/\)
        ]]></cdf>
            <variance><![CDATA[\(
            \mbox{Var}\left[T\right]=
            \begin{cases}
            \frac{\nu(1+\mu^2)}{\nu-2}
            -\frac{\mu^2\nu}{2}
            \left(\frac{\Gamma((\nu-1)/2)}{\Gamma(\nu/2)}\right)^2 ,
            &\mbox{if }\nu>2 ;\\
            \mbox{Does not exist},
            &\mbox{if }\nu\le2 .\\
            \end{cases}\)
        ]]></variance>
            <mode>\(\sqrt{\frac{\nu}{2}}\frac{\Gamma\left(\frac{\nu+2}{2}\right)}{\Gamma\left(\frac{\nu+3}{2}\right)}\mu;\,\)</mode>
            <cite>lenth1989algorithm</cite>
        </distribution>

        <distribution id="inverted-gamma">
            <name>inverted gamma distribution</name>
            <type>continuous</type>
            <type>nonsymmetric</type>
            <type>finite</type>
            <type>positive</type>
            <pdf>\( \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1} \exp \left(\frac{-\beta}{x}\right)  \)</pdf>
            <cdf>\(\frac{\Gamma(\alpha,\beta/x)}{\Gamma(\alpha)} \!\)</cdf>
            <mgf>\(\frac{2\left(-\beta t\right)^{\!\!\frac{\alpha}{2}}}{\Gamma(\alpha)}K_{\alpha}\left(\sqrt{-4\beta t}\right)\)</mgf>
            <mean>\(\frac{\beta}{\alpha-1}\!\) for \(\alpha > 1\)</mean>
            <mode>\(\frac{\beta}{\alpha+1}\!\)</mode>
            <variance>\(\frac{\beta^2}{(\alpha-1)^2(\alpha-2)}\!\) for \(\alpha > 2\)</variance>
            <skew>\(\frac{4\sqrt{\alpha-2}}{\alpha-3}\!\) for \(\alpha > 3\)</skew>
            <kurt>\(\frac{30\,\alpha-66}{(\alpha-3)(\alpha-4)}\!\) for \(\alpha > 4\)</kurt>
            <entropy>\(\alpha\!+\!\ln(\beta\Gamma(\alpha))\!-\!(1\!+\!\alpha)\Psi(\alpha)\)</entropy>
            <cite>witkovsky2001computing</cite>
        </distribution>

        <distribution id="Fisher-Tippett">
            <name>Fisher-Tippett distribution</name>
            <type>continuous</type>
            <type>nonsymmetric</type>
            <type>finite</type>
            <type>positive</type>
            <pdf>\( \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1} \exp \left(\frac{-\beta}{x}\right)  \)</pdf>
            <cite>embrechts2011modelling</cite>
        </distribution>

        <distribution id="Gibrat's">
            <name>Gibrat's distribution</name>
            <type>continuous</type>
            <type>nonsymmetric</type>
            <type>finite</type>
            <type>positive</type>
            <pdf>\( \frac{1}{\sigma\sqrt{2\pi}}\exp\left[-\frac{\left(\ln(x)\right)^2}{2\sigma^2}\right]  \)</pdf>
            <cite>eeckhout2004gibrat</cite>
        </distribution>

        <distribution id="Gompertz">
            <name>Gompertz distribution</name>
            <type>continuous</type>
            <type>nonsymmetric</type>
            <type>finite</type>
            <type>positive</type>
            <pdf>\( b e^{-bx} e^{-\eta e^{-bx}}\left[1 + \eta\left(1 - e^{-bx}\right)\right]  \)</pdf>
            <cdf>\(1-\exp\left(-\eta\left(e^{bx}-1 \right)\right)\)</cdf>
            <mgf>\(\text{E}\left(e^{-t x}\right)=\eta e^{\eta}\text{E}_{t/b}\left(\eta\right)\), \(\text{with E}_{t/b}\left(\eta\right)=\int_1^\infty e^{-\eta v} v^{-t/b}dv,\ t>0\)</mgf>
            <mean>\((-1/b)e^{\eta}\text{Ei}\left(-\eta\right)\), \(\text {where  Ei}\left(z\right)=\int\limits_{-z}^{\infty}\left(e^{-v}/v\right)dv\)</mean>
            <median>\(\left(1/b\right)\ln\left[\left(-1/\eta\right)\ln\left(1/2\right)+1\right]\)</median>
            <cite>bemmaor2012modeling</cite>
        </distribution>

        <distribution id="multinomial">
            <name>multinomial distribution</name>
            <type>discrete</type>
            <type>nonsymmetric</type>
            <type>finite</type>
            <type>positive</type>
            <support>\(X_i \in \{0,\dots,n\}\), where \(\Sigma X_i = n\!\)</support>
            <pdf>\( f(x_1, x_2, \cdots, x_k)={n\choose x_1,x_2,\cdots, x_k}p_1^{x_1}p_2^{x_2}\cdots p_k^{x_k}  \)</pdf>
            <mgf>\(\biggl( \sum_{i=1}^k p_i e^{t_i} \biggr)^n\)</mgf>
            <mean>\(E\{X_i\} = np_i\)</mean>
            <variance>\(\textstyle{\mathrm{Var}}(X_i) = n p_i (1-p_i)\), where \(\textstyle {\mathrm{Cov}}(X_i,X_j) = - n p_i p_j~~(i\neq j)\)</variance>
            <cite>evans2000statistical</cite>
        </distribution>

        <distribution id="negative-multinomial">
            <name>negative multinomial distribution</name>
            <type>discrete</type>
            <type>nonsymmetric</type>
            <type>finite</type>
            <type>positive</type>
            <pdf>\( f(k_o, \cdots, k_r) = \Gamma(k_o + \sum_{i=1}^r{k_i}) \frac{p_o^{k_o}}{\Gamma(k_o)} \prod_{i=1}^r{\frac{p_i^{k_i}}{k_i!}}  \)</pdf>
            <mean>\(\tfrac{k_0}{p_0}\,p\)</mean>
            <variance>\(\tfrac{k_0}{p_0^2}\,pp' + \tfrac{k_0}{p_0}\,\operatorname{diag}(p)\)</variance>
            <cite>gall2006modes</cite>
        </distribution>

        <distribution id="negative-hypergeometric">
            <name>negative hypergeometric distribution</name>
            <type>discrete</type>
            <type>finite</type>
            <type>positive</type>
            <model>If \({x \choose 2} \lt\lt W\) and \({b \choose 2} \lt\lt B\) then \(X\) can be
                approximated as a negative binomial random variable with parameters \(r = b\) and
                \(p = \frac{W}{W+B}\). This approximation simplifies the distribution by looking as
                a system with replacement for large values of \(W\) and \(B\).</model>
            <parameter>\(W \in \{1,2,...\}\)</parameter>
            <parameter>\(B \in \{1,2,...\}\)</parameter>
            <parameter>\(b \in \{1,2,...,B\}\)</parameter>
            <support>\(x=\{0,1,...,W\}\)</support>
            <pdf>\( f(x) \frac{ { x+b-1 \choose x} {W+B-b-x \choose W-x} }{ {W+B \choose W} } \)</pdf>
            <cite>askey2010generalized</cite>
        </distribution>

        <distribution id="power-series">
            <name>power series distribution</name>
            <type>discrete</type>
            <type>infinite</type>
            <type>positive</type>
            <pdf>\( f(x; c; A(c)) = a(x) c^x / A(c). (x=(0,1,...), c>0, A(c)=\sum_{x}a(x) c^x) \! \)</pdf>
            <cite>yanushauskas1980double</cite>
        </distribution>

        <distribution id="beta-Pascal">
            <name>beta-Pascal distribution</name>
            <type>discrete</type>
            <type>infinite</type>
            <type>positive</type>
            <pdf>\( f(x; a, b, n) = \binom{n-1+x}{x} \frac{B(n+a, b+x)}{B(a,b)}. (x=(0,1,...); a+b=n) \! \)</pdf>
            <cite>johnson2005univariate</cite>
        </distribution>

        <distribution id="gamma-Poisson">
            <name>gamma-Poisson distribution</name>
            <type>discrete</type>
            <type>infinite</type>
            <type>positive</type>
            <support>\(\{k, k+1, \ldots\}\)</support>
            <pdf>\(f(x) = {x-1 \choose k-1} p^x (1 - p)^{x-k}, \; x \in \{k, k+1, \ldots\}\)</pdf>
            <mode>\(\lfloor 1 + \frac{k-1}{p}\rfloor\)</mode>
            <cdf>\(F(x) = \sum_{j=k}^x f(j) , \; x \in \{k, k+1, \ldots\}\) where \(f\) is the probability density function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <pgf>\(G(t) = \left[\frac{p t}{1 - (1-p) t}\right]^k, \; t \in (-\frac{1}{1-p}, \frac{1}{1-p})\)</pgf>
            <mgf>\(M(t) = \left[\frac{p e^t}{1 - (1-p) e^t}\right]^k, \; t \in (-\infty, -\ln(1 - p))\)</mgf>
            <cf>\(\varphi(t) = \left[\frac{p e^{i t}}{1 - (1-p) e^{i t}}\right]^k, \; t\in (-\infty, \infty)\)</cf>
            <mean>\(k \frac{1}{p}\)</mean>
            <variance>\(k \frac{1-p}{p^2}\)</variance>
            <skew>\(\frac{2-p}{\sqrt{k (1-p)}}\)</skew>
            <kurt>\(\frac{1}{k} \left[6 + \frac{p^2}{1 - p}\right]\)</kurt>
            <median>\(Q(\frac{1}{2})\) where \(Q\) is the quantile function</median>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <history>The alternative name Pascal distribution is in honor of Blaise Pascal who used the distribution in his solution to the Problem of Points.</history>
            <cite>el2006negative</cite>
        </distribution>

        <distribution id="Polya">
            <name>Polya distribution</name>
            <type>discrete</type>
            <type>finite</type>
            <type>positive</type>
            <support>\(\{k, k+1, \ldots\}\)</support>
            <pdf>\(f(x) = {x-1 \choose k-1} p^x (1 - p)^{x-k}, \; x \in \{k, k+1, \ldots\}\)</pdf>
            <mode>\(\lfloor 1 + \frac{k-1}{p}\rfloor\)</mode>
            <cdf>\(F(x) = \sum_{j=k}^x f(j) , \; x \in \{k, k+1, \ldots\}\) where \(f\) is the probability density function</cdf>
            <qf>\(Q(p) = F^{-1}(p), \; p \in (0, 1)\) where \(F\) is the distribution function</qf>
            <pgf>\(G(t) = \left[\frac{p t}{1 - (1-p) t}\right]^k, \; t \in (-\frac{1}{1-p}, \frac{1}{1-p})\)</pgf>
            <mgf>\(M(t) = \left[\frac{p e^t}{1 - (1-p) e^t}\right]^k, \; t \in (-\infty, -\ln(1 - p))\)</mgf>
            <cf>\(\varphi(t) = \left[\frac{p e^{i t}}{1 - (1-p) e^{i t}}\right]^k, \; t\in (-\infty, \infty)\)</cf>
            <mean>\(k \frac{1}{p}\)</mean>
            <variance>\(k \frac{1-p}{p^2}\)</variance>
            <skew>\(\frac{2-p}{\sqrt{k (1-p)}}\)</skew>
            <kurt>\(\frac{1}{k} \left[6 + \frac{p^2}{1 - p}\right]\)</kurt>
            <median>\(Q(\frac{1}{2})\) where \(Q\) is the quantile function</median>
            <q1>\(Q(\frac{1}{4})\) where \(Q\) is the quantile function</q1>
            <q3>\(Q(\frac{3}{4})\) where \(Q\) is the quantile function</q3>
            <history>The alternative name Pascal distribution is in honor of Blaise Pascal who used the distribution in his solution to the Problem of Points.</history>
            <cite>el2006negative</cite>
        </distribution>

        <distribution id="gamma-normal">
            <name>gamma-normal distribution</name>
            <type>bivariate</type>
            <type>continuous</type>
            <type>infinite</type>
            <pdf>\( f(x,\tau|\mu,\lambda,\alpha,\beta) = \frac{\beta^\alpha \sqrt{\lambda}}{\Gamma(\alpha)\sqrt{2\pi}}  \, \tau^{\alpha-\frac{1}{2}}\,e^{-\beta\tau}\,e^{ -\frac{ \lambda \tau (x- \mu)^2}{2}} \)</pdf>
            <mean>\(\operatorname{E}(X)=\mu\,\! ,\quad \operatorname{E}(\tau)= \alpha \beta^{-1}\)</mean>
            <variance>\(\operatorname{var}(X)= \frac{\beta}{\lambda (\alpha-1)} ,\quad
                \operatorname{var}(\tau)=\alpha \beta^{-2}\)</variance>
            <cite>bernardo2001bayesian</cite>
        </distribution>

        <distribution id="discrete-Weibull">
            <name>discrete Weibull distribution</name>
            <type>discrete</type>
            <type>infinite</type>
            <type>positive</type>
            <pdf>\( f(x; p, \beta) = (1-p)^{x^\beta}-(1-p)^{(x+1)^\beta}. (x=\{0,1,...\}) \! \)</pdf>
            <cite>englehardt2012methods</cite>
        </distribution>

        <distribution id="noncentral beta distribution">
            <name>noncentral beta distribution</name>
            <type>continuous</type>
            <type>positive</type>
            <pdf>\( f(x; \beta, \gamma, \delta) = \sum_{i=0}^{\infty}\frac{\Gamma(i+\beta+\gamma)}{\Gamma(\gamma) \Gamma(i+\beta)} \frac{exp(-\delta/2)}{i!} (\delta/2)^i x^{i+\beta-1} (1-x)^{\gamma-1}. (0 \leq x \leq 1) \! \)</pdf>
            <cdf>\(F(x) = \sum_{j=0}^\infty \frac{1}{j!}\left(\frac{\lambda}{2}\right)^je^{-\lambda/2}I_x(a+j,b)\) where \(I_x\) is regularized incomplete beta function</cdf>
            <cite>abramowitz1972handbook</cite>
        </distribution>

        <distribution id="arctangent">
            <name>arctangent distribution</name>
            <type>continuous</type>
            <type>infinite</type>
            <type>positive</type>
            <pdf>\( f(x; \lambda, \phi)= \frac{\lambda}{[\arctan(\lambda \phi)+\pi/2]
                [1+\lambda^2 (x - \phi)^2]} (x \geq 0, -\infty
                \lt \lambda \lt \infty) \! \)</pdf>
            <cite>pollastri2004some</cite>
        </distribution>

        <distribution id="log-gamma">
            <name>log-gamma distribution</name>
            <type>continuous</type>
            <type>infinite</type>
            <pdf>\( f(x)=[1/ \alpha^\beta \Gamma(\beta)]e^{\beta x}e^{-e^x/a}\),
                where \((-\infty \lt x \lt \infty) \! \)</pdf>
            <cite>demirhan2011multivariate</cite>
        </distribution>

    </distributions>

    <relations>
        <relation id="Bernoulli/binomial">
            <from>Bernoulli distribution</from>
            <to>binomial distribution</to>
            <statement>If \((X_1, X_2, \ldots, X_n)\) is a sequence of independent Bernoulli variables, each with parameter \(p \in [0, 1]\) then \(Y = \sum_{i = 1}^n X_i\) has the binomial distribution with parameters \(n\) and \(p\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Bernoulli/geometric">
            <from>Bernoulli distribution</from>
            <to>geometric distribution</to>
            <statement>If \((X_1, X_2, \ldots)\) is a sequence of independent Bernoulli variables, each with parameter \(p \in (0, 1)\), then \(Y = \min\{n \in \{1, 2, \ldots\}: X_n = 1\}\) has the geometric distribution with parameter \(p\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Bernoulli/negative-binomial">
            <from>Bernoulli distribution</from>
            <to>negative binomial distribution</to>
            <statement>If \((X_1, X_2, \ldots)\) is a sequence of independent Bernoulli variables,
                each with parameter \(p \in (0, 1)\),
                then for \(ki \in \{1, 2, \ldots\}\), \(Y = \min\{n \in \{1, 2, \ldots\}:
                \sum_{i=1}^n X_i = k\}\) has the negative binomial distribution with parmeters \(k\) and \(p\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Bernoulli/Rademacher">
            <from>Bernoulli distribution</from>
            <to>Rademacher distribution</to>
            <statement>If \(X\) has the Bernoulli distribution with parameter \(\frac{1}{2}\) then \(2 X - 1\) has the Rademacher distribution.</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/arcsine">
            <from>beta distribution</from>
            <to>arcsine distribution</to>
            <statement>The beta distribution with parameters \(\alpha = \frac{1}{2}\) and \(\beta = \frac{1}{2}\) is the arcsine distribution.</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/continuous-uniform">
            <from>beta distribution</from>
            <to>continuous uniform distribution</to>
            <statement>The beta distribution with parameters \(\alpha = 1\) and \(\beta = 1\) is the (standard) continuous uniform distribution.
            		Also, the k-th order statistic from sample of n independent continuous uniform variables 
            		is Beta(k,n+1-k).</statement>
            <type>special case</type>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

		<relation id="beta general/beta">
            <from>beta general distribution</from>
            <to> beta distribution</to>
            <statement>If \(X\) has the beta general distribution with parameters \(\alpha \in (0, \infty)\), \(\beta \in (0, \infty)\) and \(L \lt R\), then \(Y = \frac{X-L}{R-L}\) has beta distribution with parameters \(\alpha\) and \(\beta\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>
        
        <relation id="beta/inverse-beta">
            <from>beta distribution</from>
            <to>inverse beta distribution</to>
            <statement>If \(X\) has the beta distribution with parameters \(\alpha \in (0, \infty)\) and \(\beta \in (0, \infty)\), then \(Y = \frac{X}{1 - X}\) has the inverse beta distribution with parameters \(\alpha\) and \(\beta\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/semicirce">
            <from>beta distribution</from>
            <to>semicircle distribution</to>
            <statement>If \(X\) has the beta distribution with parameters \(\alpha = \frac{3}{2}\) and \(\beta = \frac{3}{2}\), and \(r \in (0, \infty)\), then \(Y = r (2 X - 1)\) has the semicircle distribution with parameter \(r\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/beta">
            <from>beta distribution</from>
            <to>beta distribution</to>
            <statement>If \(X\) has the beta distribution with parameters \(\alpha \in (0, \infty)\) and \(\beta \in (0, \infty)\) then \(Y = 1 - X\) has the beta distribution with parameters \(\beta\) and \(\alpha\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="r">
            <from>beta distribution</from>
            <to>beta distribution</to>
            <statement>If \(X\) has the beta distribution with parameters \(\alpha \in (0, \infty)\) and \(\beta = 1\), and \(r \in (0, \infty)\), then \(Y = X^r\) has the beta distribution with parameters \(\frac{\alpha}{r}\) and \(1\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/Pareto">
            <from>beta distribution</from>
            <to>Pareto distribution</to>
            <statement>If \(X\) has the beta distribution with left parameter \(\alpha \in (0, \infty)\) and right parameter \(1\), then \(Y = \frac{1}{X}\) has the Pareto distribution with shape parameter \(\alpha\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/binomial/beta-binomial">
            <from>beta distribution</from>
            <from>binomial distribution</from>
            <to>beta-binomial distribution</to>
            <statement>If \(P\) has the beta distribution with parameters \(\alpha \in (0, \infty)\) and \(\beta \in (0, \infty)\) and if the conditional distribution of \(X\) given \(P = p\) has the binomial distribution with parameters \(n \in \{1, 2, \ldots\}\) and \(p\), then \(X\) has the beta-binomial distribution with parameters \(n\), \(\alpha\), and \(\beta\).</statement>
            <type>Conditioning</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta-binomial/continuous uniform">
            <from>beta-binomial distribution</from>
            <to>continuous uniform distribution</to>
            <statement>The standard uniform distribution is a special case of beta-binomial distribution with \( \a=\b=1 \) </statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta-binomial/negative hypergeometric">
            <from>beta-binomial distribution</from>
            <to>negative hypergeometric distribution</to>
            <statement>The negative hypergeometric distribution is a special case of beta-binomial distribution with \( n=n_1, a=n_2, b=n_3 \) </statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="binomial/normal">
            <from>binomial distribution</from>
            <to>standard normal distribution</to>
            <statement>If \(X_n\) has the binomial distribution with parameters \(n \in \{1, 2, \ldots\}\) and fixed \(p \in (0, 1)\) then then the distribution of \(Z_n = \frac{X_n - n p}{\sqrt{n p (1 - p)}}\) converges to the standard normal distribution as \(n \to \infty\).</statement>
            <type>central limit theorem</type>
            <cite>dinov2008central</cite>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="binomial/Bernoulli">
            <from>binomial distribution</from>
            <to>Bernoulli distribution</to>
            <statement>The binomial distribution with parameters \(n = 1\) and \(p \in [0, 1]\) is the Bernoulli distribution with parameter \(p\).
            		Also, if \((X_1, X_2, \ldots, X_n)\) is a sequence of independent Bernoulli variables, each with parameter \(p \in [0, 1]\) then \(Y = \sum_{i = 1}^n X_i\) has the binomial distribution with parameters \(n\) and \(p\).
           	</statement>
            <type>special case</type>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="binomial/binomial">
            <from>binomial distribution</from>
            <to>binomial distribution</to>
            <statement>If \(X\) has the binomial distribution with parameters \(n \in \{1, 2, \ldots\}\) and \(p \in [0, 1]\); \(Y\) has the binomial distribution with parameters \(m \in \{1, 2, \ldots\}\) and \(p\); and \(X\) and \(Y\) are independent, then \(X + Y\) has the binomial distribution with parameters \(m + n\) and \(p\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="binomial/hypergeometric">
            <from>binomial distribution</from>
            <to>hypergeometric distribution</to>
            <statement>Suppose that \(\boldsymbol{X} = (X_1, X_2, \ldots)\) is a Bernoulli trials sequence with parameter \(p \in (0, 1)\). For \(n \in \{1, 2, \ldots\}\) let \(Y_n = \sum_{i=1}^n X_i\), so that \(Y_n\) has the binomial distribution with parameters \(n\) and \(p\). If \(m \lt n\) then the distribution of \(Y_m\) given \(Y_n = k\) is hypergeoemtric with parameters \(m\), \(n\), and \(k\).</statement>
            <type>Conditional distribution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="binomial/Poisson">
            <from>binomial distribution</from>
            <to>Poisson distribution</to>
            <statement>The binomial distribution with parameters \(n \in \{1, 2, \ldots\}\) and \(p \in (0, 1)\) converges to the Poisson distribution with parameter \(\lambda \in (0, \infty)\) if \(n \to \infty\), \(p \to 0\), with \(n p \to \lambda\).</statement>
            <type>parameter limit</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="binomial/negative-binomial">
            <from>binomial distribution</from>
            <to>negative binomial distribution</to>
            <statement>For \(n \in \{1, 2, \ldots\}\), let \(Y_n\) denote the number of successes in the first \(n\) of a sequence of Bernoulli trials, so that \(Y_n\) has the binomial distribution with trial parameter \(n\) and sucess parameter \(p\). Then for \(k \in \{1, 2, \ldots\}\), \(Z_k = \min\{n: Y_n \geq k\} - k\) has the negative binomial distribution with stopping parameter \(k\) and success parameter \(p\).</statement>
            <type>inverse stochastic process</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Cauchy/Cauchy.1">
            <from>Cauchy Distribution</from>
            <to>Cauchy Distribution</to>
            <statement>If \(X\) has the Cauchy distribution with location parameter \(\alpha_1 \in (-\infty, \infty)\) and location parameter \(\beta_1 \in (0, \infty)\), \(Y\) has the Cauchy distribution with location parameter \(\alpha_2 \in (-\infty, \infty)\) and scale parameter \(\beta_2 \in (0, \infty)\), and \(X\) and \(Y\) are independent, then \(X + Y\) has the Cauchy distribution with location parameter \(\alpha_1 + \alpha_2\) and scale parameter \(\beta_1 + \beta_2\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>
	
		<relation id="Cauchy/normal.1">
			<from>Cauchy Distribution</from>
			<to>normal Distribution</to>
			<statement>If \(Z_1\) and \(Z_2\) are two independent standard normal variables, then the ratio \(\frac{Z_1}{Z_2}\)
				has the standard Cauchy distribution.</statement>
			<type>transformation</type>
			<cite>MR1326603</cite>
		</relation>
		
		<relation id="Cauchy/normal">
			<from>Cauchy Distribution</from>
			<to>normal Distribution</to>
			<statement>If \(Z_1\) and \(Z_2\) are two independent normally distributed random 
				variables \(\sim N(0, \sigma^2)\), then the ratio \(\frac{Z_1}{Z_2}\)
				has the standard Cauchy distribution.</statement>
			<type>transformation</type>
			<cite>MR1326603</cite>
		</relation>
		
        <relation id="Cauchy/Cauchy.2">
            <from>Cauchy distribution </from>
            <to>Cauchy distribution</to>
            <statement>If \(X\) has Cauchy distribution with location parameter \(\alpha \in (-\infty, \infty)\) and scale parameter \(\beta \in (0, \infty)\), \(a \in (-\infty, \infty)\) and \(b \in (0, \infty)\), then \(a + b X\) has the Cauchy distribution with location parameter \(a + b \alpha\) and location parameter \(\beta b\).</statement>
            <type>location-scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/chi-square">
            <from>chi-square distribution</from>
            <to>chi-square distribution</to>
            <statement>If \(X\) has the chi-square distribution with \(m \in (0, \infty)\) degrees of freedom; \(Y\) has the chi-square distribution with \(n \in (0, \infty)\) degrees of freedom; and \(X\) and \(Y\) are independent, then \(X + Y\) has the chi-square distribution with \(m + n\) degrees of freedom.</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/gamma">
            <from>chi-square distribution</from>
            <to>gamma distribution</to>
            <statement>If \(X\) has a chi-square distribution with \(\nu \in \{1, 2, \ldots\}\) degrees of freedom, and \(c \in (0, \infty)\) , then \(Y = c X\) has the gamma distribution with shape parameter \(k = \frac{\nu}{2}\) and scale parameter \(\theta = 2 c\).</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/normal">
            <from>chi-square distribution</from>
            <to>normal distribution</to>
            <statement>If \(X_n\) has the chi-square distribution with \(n \in \{1, 2, \ldots\}\) degrees of freedom, then the distribution of \(Z = \frac{X_n - n}{\sqrt{2 n}}\) converges to the standard normal distribution as \(n \to \infty\).</statement>
            <type>central limit theorem</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
            <cite>dinov2008central</cite>
        </relation>

        <relation id="chi-square/F">
            <from>chi-square distribution</from>
            <to>F-distribution</to>
            <statement>If \(U\) has the chi-square distribution with \(m \in \{1, 2, \ldots\}\) degrees of freedom; \(V\) has the chi-square distribution with \(n \in \{1, 2, \ldots\}\) degrees of freedom; and \(U\) and \(V\) are independent, then \(X = \frac{U/m}{V/n}\) hs the \(F\)-distribution with \(m\) degrees of freedom in the numerator and \(n\) degrees of freedom in the denominator</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square-noncentral/chi-squre">
            <from>non-central chi-square distribution</from>
            <to>chi-square distribution</to>
            <statement>If \(X\) has the non-central chi-square distribution with \(\nu \in \{1, 2, \ldots\}\) degrees of freedom and non-centrality parameter \(\lambda = 0\), then \(X\) has a chi-square distribution with \(\nu\) degrees of freedom.</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/chi">
            <from>chi-square distribution</from>
            <to>chi distribution</to>
            <statement>If \(X\) has the chi-square distribution with \(n \in \{1, 2, \ldots\}\) degrees of freedom, then \(\sqrt{X}\) has the chi distribution with \(n\) degrees of freedom.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/normal/student">
            <from>chi-square distribution</from>
            <from>normal distribution</from>
            <to>Students t distribution</to>
            <statement>If \(Z\) has the standard normal distribution, \(V\) has the chi-square distribution with \(n \in (0, \infty)\) degrees of freedom, and \(Z\) and \(V\) are independent, then \(T = \frac{Z}{\sqrt{V / n}}\) has the students's \(t\)-distribution with \(n\) degrees of freedom.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/Poisson/Rice">
            <from>chi-square distribution</from>
            <from>Poisson distribution</from>
            <to>Rice distribution</to>
            <statement>If \(X\) has the Poisson distribution with parameter \(\frac{\nu^2}{2 \sigma^2}\) where \(\nu \in (0, \infty)\) and \(\sigma \in (0, \infty)\), and the conditional distribution of \(Y\) given \(X = x \in \{0, 1, 2, \ldots\}\) is chi-square with \(2 x + 2\) degrees of freedom, then \(\sigma \sqrt{X}\) has the Rice distribution with distance parameter \(\nu\) and scale parameter \(\sigma\).</statement>
            <type>mixture and transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/continuous uniform">
            <from>continuous uniform distribution</from>
            <to>continuous uniform distribution</to>
            <statement>If \(X\) is uniformly distributed on the interval \([a, b]\) and \(c, d \in (-\infty, \infty)\) with \(c \ne 0\), then \(Y = cX + d\) is uniformly distributed on \([ca + d, cb + d]\) if \(c \gt 0\) or on \([cb + d, ca + d]\) if \(c \lt 0\)</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/continuous uniform">
            <from>continuous uniform distribution</from>
            <to>standard uniform distribution</to>
            <statement>The continuous uniform distribution on \([0, 1]\) is the standard uniform distribution</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/triangular">
            <from>continuous uniform distribution</from>
            <to>triangular distribution</to>
            <statement>If \(X\) and \(Y\) are independent and each is uniformly distributed on the interval \([a, b]\), then \(X + Y\) has the triangular distribution with parameters \(a\), \(b\), and \(c = \frac{a+b}{2}\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/exponential">
            <from>continuous uniform distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the standard uniform distribution and \(\beta \in (0, \infty)\), then \(-\beta \ln(1 - X)\) has the exponential distribution with scale parameter \(\beta\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Pareto">
            <from>continuous uniform distribution</from>
            <to>Pareto distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(\mu \in (-\infty, \infty)\), and \(\beta \in (0, \infty)\) then \(\frac{\mu}{(1 - X)^{1/\beta}}\) has the Pareto distribution with location parameter \(\mu\) and shape parameter \(\beta\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/beta">
            <from>continuous uniform distribution</from>
            <to>beta distribution</to>
            <statement>If \(X\) has the standard uniform distribution and \(\alpha \in (0, \infty)\) then \(X^{1/\alpha}\) has the beta distribution with left parameter \(\alpha\) and right parameter 1.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Cauchy">
            <from>continuous uniform distribution</from>
            <to>Cauchy distribution</to>
            <statement>If \(X\) has the standard uniform distribution then \(\tan[\pi(X - \frac{1}{2})]\) has the standard Cauchy distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/arcsine">
            <from>continuous uniform distribution</from>
            <to>arcsine distribution</to>
            <statement>If \(X\) has the standard uniform distribution then \(\sin^2(\frac{\pi}{2} X)\) has the arcsine distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/exponential-logarithmic">
            <from>continuous uniform distribution</from>
            <to>exponential-logarithmic distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(b \in (0, \infty)\), and \(p \in (0, 1)\) then \(\frac{1}{b}\ln\left(\frac{1 - p}{1 - p^{1 - X}}\right)\) has the exponential-logarithmic distribution with parameters \(b\) and \(p\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/geometric">
            <from>continuous uniform distribution</from>
            <to>geometric distribution</to>
            <statement>If \(X\) has the standard uniform distribution and \(p \in (0, 1)\) then \(\lceil \frac{\ln(1 - X)}{\ln(1 - p)}\rceil\) has the geometric distribution with parameter \(p\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Gumbel">
            <from>continuous uniform distribution</from>
            <to>Gumbel distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(\mu \in (-\infty, \infty)\), and \(\sigma \in (0, \infty)\) then \(\mu - \sigma \ln(-\ln(X))\) has the Gumbel distribution with location prarameter \(\mu\) and scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/hyperbolic-secant">
            <from>continuous uniform distribution</from>
            <to>hyperbolic secant distribution</to>
            <statement>If \(X\) has the standard uniform distribution then \(\frac{2}{\pi} \ln[\tan(\frac{\pi}{2} X)]\) has the hyperbolic secant distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Laplace">
            <from>continuous uniform distribution</from>
            <to>Laplace distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(\mu \in (-\infty, \infty)\), \(b \in (0, \infty)\), then \(\mu + b \ln(2 \min\{X, 1 - X\})\) has the Laplace distribution with location parameter \(\mu\) and scale parameter \(b\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/logistic">
            <from>continuous uniform distribution</from>
            <to>logistic distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(\mu \in (-\infty, \infty)\), and \(\sigma \in (0, \infty)\), then \(\mu + \sigma \ln\left(\frac{X}{1 - X}\right)\) has the logistic distribution with location parameter \(\mu\) and scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/log-logistic">
            <from>continuous uniform distribution</from>
            <to>log-logistic distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(\alpha \in (0, \infty)\), and \(\beta \in (0, \infty)\), then \(\alpha \left(\frac{X}{1 - X}\right)^{1/\beta}\) has the log-logistic distribution with scale parameter \(\alpha\) and shape parameter \(\beta\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Rayleigh">
            <from>continuous uniform distribution</from>
            <to>Rayleigh distribution</to>
            <statement>If \(X\) has the standard uniform distribution and \(\sigma \in (0, \infty)\), then \(\sigma \sqrt{-2 \ln(1 - X)}\) has the Rayleigh distribution with scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Weibull">
            <from>continuous uniform distribution</from>
            <to>Weibull distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(\sigma \in (0, \infty)\) and \(\alpha \in (0, \infty)\), then \(\sigma (-\ln(1 - X))^{1/\alpha}\) has the Weibull distribution with shape parameter \(\alpha\) and scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Irwin-Hall">
            <from>continuous uniform distribution</from>
            <to>Irwin-Hall distribution</to>
            <statement>If \((X_1, X_2, \ldots, X_n)\) is a sequence of independent random variables, each with the standard uniform distribution, then \(sum_{i=1}^n X_i\) has the Irwin-Hall distribution with parameter \(n\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/exponential">
            <from>exponential distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the exponential distribution with rate parameter \(r \in (0, \infty)\), \(Y\) has the exponential distribution with rate parameter \(s \in (0, \infty)\), and \(X\) and \(Y\) are independent, then \(\min\{X, Y\}\) has the exponential distribution with rate parameter \(r + s\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/continuous-uniform">
            <from>exponential distribution</from>
            <to>continuous uniform distribution</to>
            <statement>If \(X\) has the exponential distribution with parameter \(\lambda \in (0, \infty)\) then \(Y = e^{-\lambda X}\) has the standard uniform distribution.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/gamma">
            <from>exponential distribution</from>
            <to>gamma distribution</to>
            <statement>If \((X_1, X_2, \ldots, X_n)\) is a sequence of independent random variables, each with the exponential distribution with parameter \(\lambda \in (0, \infty)\) then \(Y = \sum_{i=1}^n X_i\) has the gamma distribution with shape parameter \(n\) and scale parameter \(\frac{1}{\lambda}\).
            		Also, If \(X\) has the gamma distributed with parameter shape parameter \(k = 1\) and scale parameter \(\lambda \in (0, \infty)\) then and then \(X\) has the exponential distribution with scale parameter \(\lambda\) (and hence rate parameter \(1/\lambda\).</statement>
            <type>convolution</type>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/Pareto">
            <from>exponential distribution</from>
            <to>Pareto distribution</to>
            <statement>If \(a \in (0, \infty)\) and \(X\) has the exponential distribution with parameter \(\lambda \in (0, \infty)\) then \(Y = a e^X\) has the Pareto distribution with scale parameter \(a\) and shape parameter \(\lambda\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/Weibull">
            <from>exponential distribution</from>
            <to>Weibull distribution</to>
            <statement>If \(X\) has the standard exponential distribution, \(k \in (0, \infty)\), and \(b \in (0, \infty)\), then \(Y = b X^{1/k}\) has the Weibull distribution with shape parameter \(k\) and scale parameter \(b\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/extreme-value">
            <from>exponential distribution</from>
            <to>extreme value distribution</to>
            <statement>If \((X_1, X_2, \ldots)\) is a sequence of indpendent random variables, each with the standard exponential distribution, then the distribution of \(\max\{X_1, \ldots, X_n\} - \ln(n)\) converges to the standard Gumbel distribution.</statement>
            <type>limiting distribution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/Laplace">
            <from>exponential distribution</from>
            <to>Laplace distribution</to>
            <statement>If \(X\) and \(Y\) are independent random variables and each has the exponential distribution with scale parameter \(\sigma \in (0, \infty)\) then \(X - Y\) has the Laplace distribution with location parameter \(0\) and scale parameter \(\sigma\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/Rademacher/Laplace">
            <from>exponential distribution</from>
            <from>Rademacher distribution</from>
            <to>Laplace distribution</to>
            <statement>If \(X\) has the exponential distribution with scale parameter \(\sigma \in (0, \infty)\), \(Y\) has the Rademacher distribution, and \(X\) and \(Y\) are independent, then \(X V\) has the Laplace distribution with location parameter \(0\) and scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/normal/Laplace">
            <from>exponential distribution</from>
            <from>normal distribution</from>
            <to>Laplace distribution</to>
            <statement>If \(X\) has the standard exponential distribution, \(Z\) has the standard normal distribution, \(X\) and \(Z\) are independent, \(\mu \in (-\infty, \infty)\), and \(\sigma \in (0, \infty)\), then \(\mu + \sigma Z \sqrt{2 X}\) has the Laplace distribution with location parameter \(\mu\) and scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="F/F">
            <from>F-distribution</from>
            <to>F-distribution</to>
            <statement>If \(X\) has the F-distribution with \(m \in \{1, 2, \ldots\}\) degrees of freedom in the numerator and \(n \in \{1, 2, \ldots\}\) degrees of freedom in the denominator, then \(\frac{1}{X}\) has the F-distribution with \(n\) degrees of freedom in the numerator and \(m\) degrees of freedom in the denominator.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="F/beta">
            <from>F-distribution</from>
            <to>beta distribution</to>
            <statement>If \(X\) has the F-distribution with \(m \in (0, \infty)\) degrees of freedom in the numerator and \(n \in (0, \infty)\) degrees of freedom in the denominator, then \(\frac{(m/n)X}{1 + (m/n)X}\) has the beta distribution with left parameter \(\frac{m}{2}\) and right parameter \(\frac{n}{2}\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="F/chi-square">
            <from>F-distribution</from>
            <to>chi-square distribution</to>
            <statement>If \(X\) has the chi-square distribution with \(m \in \{1, 2, \ldots\}\) degrees of freedom in the numerator and \(n \in \{1, 2, \ldots\}\) degrees of freedom in the denominator, then the distribution of \(m X\) converges to the chi-square distribution with \(m\) degrees of freedom as \(n \to \infty\).</statement>
            <type>limiting distribution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/gamma.1">
            <from>gamma distribution</from>
            <to>gamma distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(\alpha \in (0, \infty)\) and scale parameter \(\lambda \in (0, \infty)\) and \(c \in (0, \infty)\), then \(Y = cX\) has the gamma distribution with shape parameter \(\alpha\) and scale parameter \(c \lambda\).</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/gamma.2">
            <from>gamma distribution</from>
            <to>gamma distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(\alpha \in (0, \infty)\) and scale parameter \(\lambda \in (0, \infty)\), \(Y\) has the gamma distribution with shape parameter \(\beta \in (0, \infty)\) and scale parameter \(\lambda\), and \(X\) and \(Y\) are independent, then \(X + Y\) has the gamma distribution with shape parameter \(\alpha + \beta\) and scale parameter \(\lambda\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/exponential">
            <from>gamma distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the gamma distributed with parameter shape parameter \(k = 1\) and scale parameter \(\lambda \in (0, \infty)\) then and then \(X\) has the exponential distribution with scale parameter \(\lambda\) (and hence rate parameter \(1/\lambda\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/chi-square">
            <from>gamma distribution</from>
            <to>chi-square distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(k \in (0, \infty)\) and scale parameter \(\lambda \in (0, \infty)\), then \(\frac{2 X}{\lambda}\) has the chi-square distribution with \(k\) degrees of freedom.</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/Erlang">
            <from>gamma distribution</from>
            <to>Erlang distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(k \in \{1, 2, \ldots\}\) and scale parameter \(c \in (0, \infty)\), then \(X\) has the Erlang distribution with shape parameter \(k\) and scale parameter \(c\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/Maxwell-Boltzmann">
            <from>gamma distribution</from>
            <to>Maxwell-Boltzmann distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(k = \frac{3}{2}\) and scale parameter \(\theta = 2 a^2\) where \(a \in (0, \infty)\), then \(\sqrt{X}\) has the Maxwell-Boltzmann distribution with parameter \(a\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/normal">
            <from>gamma distribution</from>
            <to>normal distribution</to>
            <statement>If \(X_k\) has the gamma distribution with shape parameter \(k \in (0, \infty)\) and scale parameter \(b \in (0, \infty)\) then the distribution of \(\frac{X - k b}{\sqrt{k} b}\) converges to the standard normal distribution as \(k \to \infty\).</statement>
            <type>central limit theorem</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/beta">
            <from>gamma distribution</from>
            <to>beta distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(\alpha \in (0, \infty)\) and scale parameter \(\lambda \in (0, \infty)\), \(Y\) has the gamma distribution with shape parameter \(\beta \in (0, \infty)\) and scale parameter \(\lambda\), and \(X\) and \(Y\) are independent, then \(\frac{X}{X + Y}\) has the beta distribution with left parameter \(\alpha\) and right parameter \(\beta\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/inverted-beta">
            <from>gamma distribution</from>
            <to>inverted beta distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(\alpha \in (0, \infty)\) and scale parameter \(\lambda \in (0, \infty)\), \(Y\) has the gamma distribution with shape parameter \(\beta \in (0, \infty)\) and scale parameter \(\lambda\), and \(X\) and \(Y\) are independent, then \(\frac{X}{Y}\) has the inverted beta distribution with shape parameters \(\alpha\) and \(\beta\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/Levy">
            <from>gamma distribution</from>
            <to>Levy distribution</to>
            <statement>If \(X\) has the gamma distribution with shape parameter \(\frac{1}{x}\) and scale parameter \(\sigma \in (0, \infty)\) then \(\frac{1}{X}\) has the Levy distribution with location parameter \(0\) and scale parameter \(\frac{2}{\sigma}\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="geometric/geometric">
            <from>geometric distribution</from>
            <to>geometric distribution</to>
            <statement>If \(X\) has the geometric distributin on \(\{0, 1, \ldots\}\) then \(X + 1\) has the geometric distribution on \(\{1, 2 \ldots\}\).</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="geometric/discrete-uniform">
            <from>geometric distribution</from>
            <to>discrete uniform distribution</to>
            <statement>If \(X\) has the geometric distribution on \(\{1, 2, \ldots\}\) with parameter \(p \in (0, 1)\) and \(n \in \{1, 2, \ldots\}\), then the conditional distribution of \(X\) given \(X \in \{1, 2, \ldots, n\}\) converges to the uniform distribution on \(\{1, 2, \ldots, n\}\) as \(p \to 0\).</statement>
            <type>limiting conditional distribution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="geometric/exponential">
            <from>geometric distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X_n\) has the geometric distribution on \(\{1, 2, \ldots\}\) with parmeter \(p_n \in (0, 1)\) for each \(n \in \{1, 2, \ldots\}\) and \(n p_n \to r \in (0, \infty)\) as \(n \to \infty\), then the distribution of \(\frac{X_n}{n}\) converges to the exponential distribution with rate parameter \(r\).</statement>
            <type>limiting distribution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
            <cite>dinov2008central</cite>
        </relation>

        <relation id="extreme-value/extreme-value">
            <from>extreme value distribution</from>
            <to>extreme value distribution</to>
            <statement>If \(X\) has the Gumbel distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\sigma \in (0, \infty)\), and \(a \in (-\infty, \infty)\), \(b \in (0, \infty)\), then \(a + b X\) has the Gumbel distribution with location parameter \(a + b \mu\) and scale parameter \(b \sigma\).</statement>
            <type>location-scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="extreme-value/continouous-uniform">
            <from>Gumbel distribution</from>
            <to>standard uniform distribution</to>
            <statement>If \(X\) has the Gumbel distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\sigma \in (0, \infty)\) then \(\exp\left[-\exp\left(\frac{X - \mu}{\sigma}\right)\right]\) has the standard uniform distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="hypergeometric/hypergeometric">
            <from>hypergeometric distribution</from>
            <to>hypergeometric distribution</to>
            <statement>If \(X\) has the hypergeometric distribution with population size \(m \in \{1, 2, \ldots\}\), sample size \(n \in \{1, 2, \ldots, m\}\) and type parameter \(r \in \{1, 2, \ldots, m\}\) then \(n - X\) has the hypergeometric distribution with population size \(m\), sample size \(n\), and type parameter \(m - r\).</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="hypergeometric/binomial">
            <from>hypergeometric distribution</from>
            <to>binomial distribution</to>
            <statement>Let \(n \in \{1, 2, \ldots\}\) and \(r_m \in \{1, 2, \ldots, m\}\) for each \(m \in \{1, 2, \ldots\}\) with \(\frac{r_m}{m} \to p \in (0, 1)\) as \(m \to \infty\). The hypergeometric distribution with population size \(m\), sample size \(n\), and type parameter \(r_m\) converges to the binomial distribution with trial parameter \(n\) and success parameter \(p\) as \(m \to \infty\).</statement>
            <type>limiting distribution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="hypergeometric/Bernoulli">
            <from>hypergeometric distribution</from>
            <to>Bernoulli distribution</to>
            <statement>If \(X\) has the hypergeometric distribution with population size \(m \in \{1, 2, \ldots\}\), sample size \(n = 1\), and type parameter \(r \in \{1, 2, \ldots, m\}\), then \(X\) has the Bernoulli distribution with parameter \(\frac{r}{m}\).</statement>
            <type>TBD</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="hyperbolic-secant/continuous-uniform">
            <from>hyperbolic secant distribution</from>
            <to>continuous uniform distribution</to>
            <statement>If \(X\) has the hyperbolic secant distribution then \(\frac{2}{\pi} \arctan[\exp(\frac{\pi}{2} X)]\) has the standard uniform distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Irwin-Hall/Irwin-Hall">
            <from>Irwin-Hall distribution</from>
            <to>Irwin-Hall distribution</to>
            <statement>If \(X\) has the Irwin-Hall distribution with parameter \(m \in \{1, 2, \ldots\}\), \(Y\) has the Irwin-Hall distribution with parameter \(n \in \{1, 2, \ldots\}\), and \(X\) and \(Y\) are independent, then \(X + Y\) has the Irwin-Hall distribution with parameter \(m + n\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Irwin-Hall/continuous-uniform">
            <from>Irwin-Hall distribution</from>
            <to>standard uniform distribution</to>
            <statement>The Irwin-Hall distribution with parameter \(1\) is the standard uniform distribution.</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Irwin-Hall/triangular">
            <from>Irwin-Hall distribution</from>
            <to>triangular distribution</to>
            <statement>The Irwin-Hall distribution with parmeter \(2\) is the triangular distribution with left endpoint \(0\), right endpoint \(1\) and midpoint \(\frac{1}{2}\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="inverted-beta/inverted-beta">
            <from>inverted beta distribution</from>
            <to>inverted beta distribution</to>
            <statement>If \(X\) has the inverted beta distribution with shape parameters \(\alpha \in (0, \infty)\) and \(\beta \in (0, \infty)\) then \(\frac{1}{X}\) has the inverted beta distribution with shape parameters \(\beta\) and \(\alpha\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="inverted-beta/F">
            <from>inverted beta distribution</from>
            <to>F-distribution</to>
            <statement>If \(X\) has the inverted beta distribution with shape parameters \(\alpha \in (0, \infty)\) and \(\beta \in (0, \infty)\) then \(\frac{\beta}{\alpha} X\) has the F-distribution with \(2 \alpha\) degrees of freedom in the numerator and \(2 \beta\) degrees of freedom in the denominator.</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Laplace/exponential">
            <from>Laplace distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the Laplace distribution with location parameter \(0\) and scale parameter \(\sigma \in (0, \infty)\) then \(|X|\) has the exponential distribution with scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Levy/folded-normal">
            <from>Levy distribution</from>
            <to>folded normal distribution</to>
            <statement>If \(X\) has the Levy distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\sigma \in (0, \infty)\), then \(\frac{1}{\sqrt{X - \mu}}\) has the folded normal distribution with location parameter \(0\) and scale parameter \(\frac{1}{\sigma}\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Levy/gamma">
            <from>Levy distribution</from>
            <to>gamma distribution</to>
            <statement>If \(X\) has the Levy distribution with location parameter \(0\) and scale parameter \(\sigma \in (0, \infty)\), then \(\frac{1}{X}\) has the gamma distribution with shape parameter \(\frac{1}{x}\) and scale parameter \(\frac{2}{\sigma}\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="logarithmic/Poisson">
            <from>logarithmic distribution</from>
            <from>Poisson distribution</from>
            <to>negative binomial distribution</to>
            <statement>If \((X_1, X_2, \ldots)\) is a sequence of independent random variables, each with the logarithmic distribution with parameter \(p \in (0, 1)\) and \(N\) has the Poisson distribution with parameter \(\lambda \in (0, \infty)\), then \(\sum_{i=1}^N X_i\) has the negative binomial distribution with parameters \(\lambda\) and \(p\).</statement>
            <type>mixture</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="logistic/continuous-uniform"> %logistic to standard uniform
            <from>logistic distribution</from>
            <to>continuous uniform distribution</to>
            <statement>If \(X\) has the logistic distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\sigma \in (0, \infty)\) then \(\frac{1}{1 + \exp\left(\frac{X - \mu}{\sigma}\right)}\) has the standard uniform distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="logistic-skew/exponential">
            <from>skew logistic distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the skew-logistic distribution with parameter \(\alpha \in (0, \infty)\), then \(Y = \ln(1+e^{-X})\) has the exponential distribution with rate parameter \(\alpha\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="log-normal/log-normal.1">
            <from>log-normal distribution </from>
            <to>log-normal distribution</to>
            <statement>If \(X\) has the log-normal distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\sigma \in (0, \infty)\), \(Y\) has the log-normal distribuiton with location parameter \(\nu \in (-\infty, \infty)\) and scale parameter \(\tau \in (0, \infty)\), and \(X\) and \(Y\) are independent, then \(X Y\) has the log-normal distirbution with location parameter \(\mu + \tau\) and scale parameter \(\sqrt{\sigma^2 + \tau^2}\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="log-normal/log-normal.2">
            <from>log-normal distribution</from>
            <to>log-normal distribution</to>
            <statement>If \(X\) has the log-normal distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parmaeter \(\sigma \in (0, \infty)\), and \(a \neq 0\) then \(a X\) has the log-normal distribution with location parameter \(a \mu\) and scale parameter \(|a| \sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="log-normal/log-normal.3">
            <from>log-normal distribution</from>
            <to>log-normal distribution</to>
            <statement>If \(X\) has the log-normal distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(a \in (0, \infty)\), and \(\sigma \in (0, \infty)\), then \(a X\) has the log-normal distribution with location parameter \(\ln(a) + \mu\) and scale parameter \(\sigma\).</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="log-normal/normal">
            <from>log-normal distribution</from>
            <to>normal distribution</to>
            <statement>If \(X\) has the log-normal distribution with location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\sigma \in (0, \infty)\), then \(\ln(X)\) has the normald distribution with location parameter \(\mu\) and scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Maxwell-Boltzmann/Maxwell-Boltzmann">
            <from>Maxwell-Boltzmann distribution </from>
            <to>Maxwell-Boltzmann distribution</to>
            <statement>If \(X\) has the Maxwell-Boltzmann distribution with scale parameter \(a \in (0, \infty)\) and \(b \in (0, \infty)\), then \(b X\) has the Maxwell-Boltzmann distribution with scale parameter \(a b\).</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Maxwell-Boltzman/chi">
            <from>Maxwell-Boltzmann distribution</from>
            <to>chi distribution</to>
            <statement>If \(X\) has the Maxwell-Boltzmann distribution with scale parameter \(a \in (0, \infty)\), then \(\frac{X}{a}\) has the chi distribution with 3 degrees of freedom.</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-binomial/negative-binomial">
            <from>negative binomial distribution</from>
            <to>negative binomial distribution</to>
            <statement>If \(X\) has the negative binomial distribution with stopping parameter \(r \in (0, \infty)\) and success parameter \(p \in (0, 1)\), \(Y\) has the negative binomial distribution with stopping parameter \(s \in (0, \infty)\) and success parameter \(p\), and \(X\) and \(Y\) are independent, then \(X + Y\) has the negative binomial distribution with stopping parameter \(r + s\) and success parameter \(p\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-binomial/geometric">
            <from>negative binomial distribution</from>
            <to>geometric distribution</to>
            <statement>The negative binomial distribution with stopping parameter \(1\) and success parameter \(p \in (0, 1)\) is the geometric distribution with success parameter \(p\).
            		Also, if \((X_1, X_2, \ldots, X_n)\) is a sequence of independent Geometric(p) random variables, their sum \(\sum_{i=1}^n{X_i} is negative binomial(n,p).</statement>
            <type>special case</type>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-binomial/Poisson">
            <from>negative binomial distribution</from>
            <to>Poisson distribution</to>
            <statement>If \(p_r \in (0, 1)\) for each \(r \in (0, \infty)\) and \(r \frac{p}{1-p} \to \lambda \in (0, \infty)\) as \(r \to \infty\), then the negative binomial distribution with stopping parameter \(r\) and success parameter \(p_r\) converges to the Poisson distribution with parameter \(\lambda\).</statement>
            <type>limiting distribution with respect to parameter</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>
        
        <relation id="Poisson/exponential">
            <from>Poisson distribution</from>
            <to>Exponential distribution</to>
            <statement>If for every t > 0 the number of arrivals in the time interval [0,t] follows
            	the Poisson distribution with mean \(\lambda t\), then the sequence of inter-arrival 
            	times are independent and identically distributed exponential random variables 
				with mean \(\fract{1}{λ}\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-binomial/normal">
            <from>negative binomial distribution</from>
            <to>normal distribution</to>
            <statement>If \(X\) has the negative binomial distribution with stopping parameter \(r \in (0, \infty)\) and success parameter \(p \in (0, \infty)\), then the distribution of \(\frac{p X - r (1 - p)}{\sqrt{r (1 - p}}\) converges to the standard normal distribution at \(r \to \infty\).</statement>
            <type>central limit theorem</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-binomial/binomial">
            <from>negative binomial distribution</from>
            <to>binomial distribution</to>
            <statement> For \(k \in \{1, 2, \ldots\}\), let \(Z_k\) denote the number of failures before the \(k\)th success in a sequence of Bernoulli trials with success parameter \(p \in (0, 1)\), so that \(Z_k\) has the negative binomial distribution with stopping parameter \(k\) and success parameter \(p\). Then for \(n \in \{1, 2, \ldots\}\), \(Y_n = \max\{k: k + Z_k \leq n\}\) has the binomial distribution with trial parameter \(n\) and success parameter \(p\).</statement>
            <type>inverse stochastic process</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/log-normal">
            <from>normal distribution</from>
            <to>log-normal distribution</to>
            <statement>If \(X\) has a normal distribution with mean \(\mu \in (-\infty, \infty)\) and variance \(\sigma^2\), then \(Y = e^X\) has the log-normal distribution with parameters \(\mu\) and \(\sigma^2\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/folded-normal">
            <from>normal distribution</from>
            <to>folded normal Distribution</to>
            <statement>If \(X\) is has the normal distribution with mean \(\mu \in (-\infty, \infty)\) and standard deviation \(\sigma \in (0, \infty)\), then \(|X|\) has the folded normal distribution with parameters \(\mu\) and \(\sigma\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/half-normal">
            <from>normal distribution</from>
            <to>half normal distribution</to>
            <statement>If \(X\) is has the normal distribution with mean \(\mu\) = 0 and standard deviation \(\sigma \in (0, \infty)\), then \(|X|\) has a half-normal distribution with parameter \(\sigma\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/noncentral-chi-square">
            <from>normal distribution</from>
            <to>non-central chi-square distribution</to>
            <statement>If \(X\)  has the normal distribution with mean \(\mu \in (-\infty, \infty)\) and standard deviation \(\sigma \in (0, \infty)\), then variable \(Y = \frac{X^2}{\sigma^2}\) has a non-central chi-square distribution with one degree of freedom and non-centrality parameter \(\frac{\mu^2}{\sigma^2}\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/truncated-normal">
            <from>normal distribution</from>
            <to>truncated normal distribution</to>
            <statement>If \(X\) is has the normal distribution with mean \(\mu \in (-\infty, \infty)\) and standard deviation \(\sigma \in (0, \infty)\), and if \(a, b \in [-\infty, \infty]\) with \(a \lt b\), then the conditional distribution of \(X\) given  \(X \in (a,b)\) is the truncated normal distribution with location parameter \(\mu\), scale parameter \(\sigma\), minimum value \(a\), and maximum value \(b\).</statement>
            <type>conditioning</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/Levy">
            <from>normal distribution</from>
            <to>Levy distribution</to>
            <statement>If \(X\) has the normal distribution with mean \(\mu \in (-\infty, \infty)\) and standard deviation \(\sigma \in (0, \infty)\), then \(\frac{1}{(X - \mu)^2}\) has the Levy distribution with location parameter 0  and scale parameter \(\frac{1}{\sigma^2}\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/Rice">
            <from>normal distribution</from>
            <to>Rice distribution</to>
            <statement>Let \(\nu \in [0, \infty)\), \(\theta \in (-\infty, \infty)\) and \(\sigma \in (0, \infty)\). If \(X\) has the normal distribution with mean \(\nu \cos(\theta)\) and standard deviation \(\sigma\), \(Y\) has the normal distribution with mean \(\nu \sin(\theta)\) and standard deviation \(\sigma\), and \(X\) and \(Y\) are independent, then \(\sqrt{X^2 + Y^2}\) has the Rice distribution with distance parameter \(\nu\) and scale parameter \(\sigma\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/normal">
            <from>normal distribution</from>
            <to>normal distribution</to>
            <statement>If \(X\) has the normal distribution with mean \(\mu = 0\) and standard deviation \(\sigma = 1\), then \(X\) has a standard normal distribution.</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/chi-square">
            <from>normal distribution</from>
            <to>chi-square distribution</to>
            <statement>If \(X_1, X_2, \ldots, X_n\) are independent standard normal random variables, then \(\sum_{i=1}^n X_i^2\) has the chi-square distribution with \(n\) degrees of freedom.</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/student">
            <from>normal distribution</from>
            <to>Students t distribution</to>
            <statement>If \(X_1, X_2, \ldots, X_n\) are independent normally distributed random variables with mean \(\mu \in (-\infty, \infty)\) and standard deviation \(\sigma \in (0, \infty)\), then \(T = \frac{\overline{X} - \mu}{S / \sqrt{n}}\) has the students t distribution with \(n-1\) degrees of freedom.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/Maxwell-Boltzmann">
            <from>normal distribution </from>
            <to>Maxwell-Boltzmann distribution</to>
            <statement>If \(X_1\), \(X_2\), and \(X_3\) are independent random variables, each with the normal distribuiton with mean \(0\) and standard deviation \(a \in (0, \infty)\), then \(\sqrt{X_1^2 + X_2^2 + X_3^2}\) has the Maxwell-Boltzmann distribution with parameter \(a\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/Cauchy">
            <from>normal distribution</from>
            <to>Cauchy distribution</to>
            <statement>If \(X\) and \(Y\) are independent variables, each with the standard normal distribution, then \(\frac{X}{Y}\) has the standard Cauchy distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pareto/Exponential">
            <from>Pareto distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the Pareto distribution with shape parameter \(a \in (0, \infty)\) and scale parameter \(b \in (0, \infty)\), then \(\ln\left(\frac{X}{b}\right)\) has the exponential distribution with rate parameter \(a\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pareto/Pareto">
            <from>Pareto distribution</from>
            <to>Pareto distribution</to>
            <statement>If \(X\) has the Pareto distribution with shape parameter \(a \in (0, \infty)\) and scale parameter \(b \in (0, \infty)\), and \(c \in (0, \infty)\) then \(c X\) has the Pareto distribution with shape parameter \(a\) and scale parameter \(b c\).</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pareto/beta">
            <from>Pareto distribution</from>
            <to>beta distribution</to>
            <statement>If \(X\) has the Pareto distribution with shape parameter \(a \in (0, \infty)\) and scale parameter \(b \in (0, \infty)\) then \(\frac{b}{X}\) has the beta distribution with left shape parameter \(a\) and right shape parameter \(1\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pareto/continuous-uniform">
            <from>Pareto distribution</from>
            <to>continuous uniform distribution</to>
            <statement>If \(X\) has the Pareto distribution with shape parameter \(a \in (0, \infty)\) and scale parameter \(b \in (0, \infty)\), then \(1 - \left(\frac{b}{X}\right)^a\) has the standard uniform distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/Poisson">
            <from>Poisson distribution</from>
            <to>Poisson distribution</to>
            <statement>If \(X\) has the Poisson distribution with parameter \(\alpha \in (0, \infty)\), \(Y\) has the Poisson distribution with parameter \(\beta \in (0, \infty)\), and \(X\) and \(Y\) are independent, then \(X + Y\) has the Poisson distribution with parameter \(\alpha + \beta\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/normal">
            <from>Poisson distribution</from>
            <to>normal distribution</to>
            <statement>If \(X\) has the Poisson distribution with parameter \(\alpha \in (0, \infty)\), then the distribution of \(\frac{X - \alpha}{\sqrt{\alpha}}\) converges to the standard normal distribution as \(\alpha \to \infty\).</statement>
            <type>central limit theorem</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/normal">
            <from>Poisson distribution</from>
            <to>normal distribution</to>
            <statement>As \( \sigma^2=\mu and \mu\to\infty \) Poisson distribution becomes normal distribution.</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/binomial">
            <from>Poisson distribution</from>
            <to>binomial distribution</to>
            <statement>If \(\{N_t: t \ge 0\}\) is a Poisson process and if \(s \lt t\), then the conditional distribution of \(N_s\) given \(N_t = n\) is binomial with parameters \(n\) and \(\frac{s}{t}\).</statement>
            <type>conditioning</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/gamma">
            <from>Poisson distribution</from>
            <to>gamma distribution</to>
            <statement>If \(\{N_t: t \ge 0\}\) is a Poisson process with rate parameter \(\alpha \in (0, \infty)\) and \(n \in \{1, 2, \ldots\}\) then \(T = \min\{t \ge 0: N_t = n\}\) has the gamma distsribution with shape parameter \(k\) and scale parameter \(\frac{1}{\alpha}\).</statement>
            <type>stochastic process</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/logarithmic/negative-binomial">
            <from>Poisson distribution</from>
            <from>logarithmic distribution</from>
            <to>negative binomial distribution</to>
            <statement>If \(\bs{X} =(X_1, X_2, \ldots)\) is a sequence of independent random variables, each with the logarithmic distribution with parameter \(p \in (0, 1)\), \(N\) has the Poisson distribution with parameter \(-r \ln(1 - p)\) where \(r \in (0, \infty)\), and \(N\) and \(\bs{X}\) are independent, then \(\sum_{i=1}^N X_i\) has the negative binomial distribution with stopping parameter \(r\) and sucess parameter \(p\).</statement>
            <type>compound Poisson transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/gamma/negative-binomial">
            <from>Poisson distribution</from>
            <from>gamma distribution</from>
            <to>negative binomial distribution</to>
            <statement>If \(\Lambda\) has the gamma distribution with shape parameter \(r \in (0, \infty)\) and scale parameter \(\frac{p}{1-p}\) where \(p \in (0, 1)\), and the conditional distribution of \(X\) given \(\Lambda = \lambda \in (0, \infty)\) is Poisson with parameter \(\lambda\), then \(X\) has the negative binomial distribution with stopping parameter \(r\) and success parameter \(p\).</statement>
            <type>mixture</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Rademacher/Bernoulli">
            <from>Rademacher distribution</from>
            <to>Bernoulli distribution</to>
            <statement>If \(X\) has the Rademacher distribution then \(\frac{X+1}{2}\) has the Bernoulli distribution with success parameter \(\frac{1}{2}\).</statement>
            <type>linear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Rayleigh/chi-square">
            <from>Rayleigh distribution</from>
            <to>chi-square distribution</to>
            <statement>If \(X\) has the Rayleigh distribution with scale parameter \(1\), then \(X^2\) has the chi-square distribution with 2 degrees of freedom.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Rayleigh/Rayleigh">
            <from>Rayleigh distribution</from>
            <to>Rayleigh distribution</to>
            <statement>If \(X\) has the Rayleigh distribution with scale parameter \(\sigma \in (0, \infty)\) and \(b \in (0, \infty)\), then \(b X\) has the Rayleigh distribution with scale parameter \(b \sigma\).</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Rayleigh/gamma">
            <from>Rayleigh distribution</from>
            <to>gamma distribution</to>
            <statement>If \((X_1, X_2, \ldots, X_n)\) is a sequence of independent random variables, each with the Rayleigh distribution with scale parameter \(\sigma \in (0, \infty)\), then \(\sum_{i=1}^n X_i^2\) has the chi-square distribution with shape parameter \(n\) and scale parameter \(2 \sigma^2\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Rayleigh/continuous-uniform">
            <from>Rayleigh distribution</from>
            <to>continuous uniform distribution</to>
            <statement>If \(X\) has the Rayleigh distribution with shape parameter \(\sigma \in (0, \infty)\), then \(1 - \exp\left(-\frac{X^2}{2 \sigma^2}\right)\) has the standard uniform distribution.</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Rice/Rayleigh">
            <from>Rice distribution</from>
            <to>Rayleigh distribution</to>
            <statement>The Rice distribution with distance parameter \(0\) and scale parameter \(\sigma \in (0, \infty)\) is the Rayleigh distribution with scale parameter \(\sigma\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Rice/noncentral-chi-square">
            <from>Rice distribution</from>
            <to>noncentral chi-square distribution</to>
            <statement>If \(X\) has the Rice distribution with distance parameter \(\nu \in [0, \infty)\) and scale parameter \(1\), then \(X^2\) has the noncentral chi-square distribution with 2 degrees of freedom and noncentrality parameter \(\nu^2\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="semicircle/continuous-uniform">
            <from>semicircle distribution</from>
            <to>continuous uniform distribution </to>
            <statement>If \(X\) has the semicircle distribution with radius \(r \in (0, \infty)\) then \(\frac{1}{2} + \frac{1}{\pi r^2} X \sqrt{r^2 - X^2} + \frac{1}{\pi} \arcsin\left(\frac{X}{r}\right)\) has the standard uniform distribution</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="stable/Cauchy">
            <from>stable distribution</from>
            <to>Cauchy distribution</to>
            <statement>If \(X\) has a stable distribution with stability parameter \(\alpha = 1\), skewness parameter \(\beta = 0\), location parameter \(\mu \in (-\infty, \infty)\), and scale parameter \(\gamma \in (0, \infty)\), then \(X\) has a Cauchy distribution with scale parameter \(\gamma\) and location parameter \(\mu\).</statement>
            <type>special case.</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="stable/normal">
            <from>stable Distribution</from>
            <to>normal Distribution</to>
            <statement>If \(X\) has a stable distribution with stability parameter \(\alpha = 2\), location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\gamma \in (0, \infty)\), then \(X\) has a normal distribution with mean \(\mu\) and variance \(\sigma^2 = 2 \gamma^2\).</statement>
            <type>special case.</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="stable/Levy">
            <from>stable Distribution</from>
            <to>Levy Distribution</to>
            <statement>If \(X\) has a stable distribution with stability parameter \(\alpha = \frac{1}{2}\), skewness parameter \(\beta=1\), location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\gamma \in (0, \infty)\), then \(X\) has a Levy distribution with scale parameter \(\gamma\) and shift parameter \(\mu\).</statement>
            <type>special case.</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="stable/Landau">
            <from>stable Distribution</from>
            <to>Landau Distribution</to>
            <statement>If \(X\) has a stable distribution with stability parameter \(\alpha = 1\), skewness parameter \(\beta = 1\), location parameter \(\mu \in (-\infty, \infty)\) and scale parameter \(\gamma \in (0, \infty)\) then \(X\) has a Landau distribution with scale parameter \(\gamma\) and location parameter \(\mu\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="student/F">
            <from>Students t distribution</from>
            <to>F distribution</to>
            <statement>if \(X\) has the students t-distribution with \(n \in \{1, 2, \ldots\}\) degrees of freedom, then \(Y = X^2\) has the F distribuiton with \(1\) degree of freedom in the numerator and \(n\) degrees of freedom in the denominator.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="student/Cauchy">
            <from>Students t distribution</from>
            <to>Cauchy distribution</to>
            <statement>The students t-distribution with 1 degree of freedom is the standard Cauchy distribuiton.</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="U-quadratic/continuous-uniform">
            <from>U-quadratic distribution</from>
            <to>continuous uniform distribuiton</to>
            <statement>If \(X\) has the U-quadratic distribution with left endpoint \(a \in (-\infty, \infty)\) and right endpoint \(b \in (a, \infty)\) then \(\frac{\alpha}{3} [(X - \beta)^3 + (\beta - \alpha)^3]\) has the standard uniform distribution, where \(\alpha = \frac{12}{(b - a)^3}\) and \(\beta = \frac{a + b}{2}\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="von-Mises/continuous-uniform">
            <from>von Mises distribution</from>
            <to>continuous uniform distribution</to>
            <statement>The von Mises distribution with location parameter \(0\) and shape parameter \(0\) is the uniform distribution on the interval \([-\pi, \pi]\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Wald/Wald.1">
            <from>Wald distribution</from>
            <to>Wald distribution</to>
            <statement>If \(X\) has the Wald distribution with mean \(\mu \in (0, \infty)\) and shape parameter \(\lambda \in (0, \infty)\) and \(t \in (0, \infty)\), then \(t X\) has the Wald distribution with mean \(t \mu\) and shape parameter \(t \lambda\)</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Wald/Wald.2">
            <from>Wald distribution</from>
            <to>Wald distribution</to>
            <statement>If \(X\) has the Wald distribution with mean \(\mu a\) and shape paramter \(\lambda a^2\) where \(\mu \in (0, \infty)\), \(\lambda \in (0, \infty)\), and \(a \in (0, \infty)\), and if \(Y\) has the Wald distribution with mean \(\mu b\) and shape parameter \(\lambda b\) where \(b \in (0, \infty)\), and if \(X\) and \(Y\) are independent, then \(X + Y\) has the Wald distribution with mean \(\mu(a + b)\) and shape paramter \(\lambda(a^2 + b^2)\).</statement>
            <type>convolution</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Weibull/Weibull">
            <from>Weibull distribution</from>
            <to>Weibull distribution</to>
            <statement>If \(X\) has the Weibull distribution with shape parameter \(k \in (0, \infty)\), scale parameter \(b \in (0, \infty)\), and \(c \in (0, \infty)\), then \(Y = c X\) has the Weibull distribution with shape parameter \(k\) and scale parameter \(b c\).</statement>
            <type>scale transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Weibull/exponential">
            <from>Weibull distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the Weibull distribution with shape parameter \(k \in (0, \infty)\) and scale parameter \(b \in (0, \infty)\), then \(Y = \left(\frac{X}{b}\right)^k\) has the standard exponential distribution.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/normal">
            <from>normal distribution</from>
            <to>normal distribution</to>
            <statement>The normal distribution with \(\mu=0\) and \(\sigma^2=1\) is called the standard normal</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="student/normal">
            <from>student distribution</from>
            <to>normal distribution</to>
            <statement>As \(n\longrightarrow\infty\), the t-distribution approaches the normal distribution with mean 0 and variance 1</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="F/student">
            <from>F distribution</from>
            <to>student distribution</to>
            <statement>The square root of a Fisher's F distribution is a students t distribution</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="binomial/normal">
            <from>binomial distribution</from>
            <to>normal distribution</to>
            <statement>If n is large enough, then the skew of the distribution is not too great. In this case, if a suitable continuity correction is used, then an excellent approximation to \(B(n, p)\) is given by the normal distribution \(N(np, np(1-p))\) as \(n \rightarrow \infty\)</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Erlang/chi-square">
            <from>Erlang distribution</from>
            <to>chi-square distribution</to>
            <statement>When the scale parameter \(\mu\) equals 2, then the Erlang distribution simplifies to the chi-square distribution with \(2k\) degrees of freedom</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="noncentral-student-t/normal">
            <from>noncentral student t distribution</from>
            <to>normal distribution</to>
            <statement>If \(T\) is noncentral t-distributed with \(\nu\) degrees of freedom and noncentrality parameter \(\mu\) and \(Z=\lim_{\nu\to\infty}T\), then \(Z\) has a normal distribution with mean \(\mu\) and unit variance</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Pareto">
            <!--same as continuous-uniform/Pareto-->
            <from>continuous uniform distribution</from>
            <to>Pareto distribution</to>
            <statement>If \(X\) has the standard uniform distribution, \(\mu \in (-\infty, \infty)\), and \(\beta \in (0, \infty)\) then \(\frac{\mu}{(1 - X)^{1/\beta}}\) has the Pareto distribution with location parameter \(\mu\) and shape parameter \(\beta\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/exponential">
            <!--same as continuous-uniform/exponential-->
            <from>continuous uniform distribution</from>
            <to>exponential distribution</to>
            <statement>If \(X\) has the standard uniform distribution and \(\beta \in (0, \infty)\), then \(-\beta \ln(1 - X)\) has the exponential distribution with scale parameter \(\beta\).</statement>
            <type>nonlinear transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Zipf/discrete-uniform">
            <from>Zipf distribution</from>
            <to>discrete uniform distribution</to>
            <statement>The discrete uniform distribution is a special case of the Zipf distribution where \(a=0, a=1, b=n\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Fisher-Tippett/Gumbel">
            <from>Fisher-Tippett distribution</from>
            <to>Gumbel distribution</to>
            <statement>The Gumbel distribution is a particular case of the Fisher-Tippett distribution where \(\mu=0, \beta=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="log-normal/Gibrat's">
            <from>log-normal distribution</from>
            <to>Gibrat's distribution</to>
            <statement>Gibrat's law is a special case of the log-normal distribution where</statement>
            <type>special case \(\mu=0, x=1\)</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Cauchy/Cauchy">
            <from>Cauchy distribution</from>
            <to>Cauchy distribution</to>
            <statement>If \(X\) is a standard Cauchy distribution, then \(Y = x_0 + \gamma X\) is a Cauchy distribution</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="multinomial/Binomial">
            <from>multinomial distribution</from>
            <to>Binomial distribution</to>
            <statement>When \(k=2\), the multinomial distribution is the binomial distribution</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="power-series/Pascal">
            <from>power series distribution</from>
            <to>Pascal distribution</to>
            <statement>The power series\(c, (A(c))\) distribution becomes a Pascal distribution when \(A(c)=(1-c)^{-x}, c=1-p\)</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="power-series/logarithmic">
            <from>power series distribution</from>
            <to>logarithmic distribution</to>
            <statement>The power series distribution is a special case of Power series distribution distribution with \(A(c)=-\log (1-c)\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Poisson/gamma-Poisson">
            <from>Poisson distribution</from>
            <to>gamma-Poisson distribution</to>
            <statement>Let \(\mu \sim Gamma(\alpha, \beta)\) denote that \(\mu\) is distributed
                according to the Gamma density g parameterized in terms of a shape parameter
                \(\alpha\) and an inverse scale parameter \(\beta\): \(g(\mu \mid \alpha, \beta) =
                \frac{\beta^{\alpha}}{\Gamma(\alpha)} \mu^{\alpha-1} e^{-\beta \mu}, \mu&gt;0\).
                Then, given the same sample of n measured values \(k_i\) as before, and a prior of
                \(Gamma(\alpha, \beta)\), the posterior distribution is
                \(\mu \sim Gamma(\alpha + \sum_{i=1}^n k_i, \beta + n)\).
                The posterior predictive distribution of additional data is a Gamma-Poisson
                distribution.</statement>
            <type>Bayesian</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma-Poisson/Pascal">
            <from>gamma-Poisson distribution</from>
            <to>Pascal distribution</to>
            <statement>The gamma-Poisson distribution becomes a Pascal distribution when \( \alpha=\frac{1-p}{p}, \beta=n \) </statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/beta-binomial">
            <from>continuous uniform distribution</from>
            <to>beta-binomial distribution</to>
            <statement>For \(a = b = 1\), the continuous uniform distribution reduces to the beta-binomial distribution as a special case</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Zipf/zeta">
            <from>Zipf distribution</from>
            <to>zeta distribution</to>
            <statement>The zeta distribution is equivalent to the Zipf distribution for infinite N. </statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="power-series/Poisson">
            <from>power series distribution</from>
            <to>Poisson distribution</to>
            <statement>The power series\((c, A(c))\) distribution becomes a Poisson distribution when \(\mu=c, A(c)=e^c\) </statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pascal/Poisson">
            <from>Pascal distribution</from>
            <to>Poisson distribution</to>
            <statement>Consider a sequence of negative binomial distributions where the stopping
                parameter n goes to infinity, whereas the probability of success in each trial,
                p, goes to zero in such a way as to keep the mean of the distribution constant.
                Denoting this mean \(\mu\), the parameter p will have to be
                \(\mu = r \frac{p}{1-p} \rightarrow p = \frac{\mu}{r+\mu}\). Then \(Poisson(\mu) =
                \lim_{n \to \infty} Pascal(n, \frac{\mu}{\mu+n})\).</statement>
            <type>transformation, limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-hypergeometric/binomial">
            <from>negative hypergeometric distribution</from>
            <to>binomial distribution</to>
            <statement>As \(n_3\to\infty, n_1\to\infty\) and letting \(p=n_1/n_3, n_2=n\), the negative hypergeometric\((n_1, n_2, n_3)\) distribution becomes a binomial\((n, p)\) distribution</statement>
            <type>transformation, limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-hypergeometric/Pascal">
            <from>negative hypergeometric distribution</from>
            <to>Pascal distribution</to>
            <statement>As \(N\to\infty, \frac{K}{N}\to\infty\), the negative hypergeometric\((n_1, n_2, n_3)\) distribution becomes a Pascal\((n, p)\) distribution</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="negative-hypergeometric/negative-binomial">
            <from>negative hypergeometric distribution</from>
            <to>negative binomial distribution</to>
            <statement>As \(N\to\infty, \frac{K}{N}\to\infty\), the negative hypergeometric\((n_1, n_2, n_3)\) distribution becomes a negative binomial\((n, p)\) distribution</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pascal/geometric">
            <from>Pascal distribution</from>
            <to>geometric distribution</to>
            <statement>The geometric distribution is a special case of the Pascal distribution where \(n=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="discrete-Weibull/geometric">
            <from>discrete Weibull distribution</from>
            <to>geometric distribution</to>
            <statement>The geometric distribution is a particular case of the discrete Weibull distribution where \(\beta=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/chi-square">
            <from>normal distribution</from>
            <to>chi-square distribution</to>
            <statement>If \(X_i \sim Normal(\mu, \sigma^2)\), with \(i=1,...,k\) independent
                random variables, then \(\sum_{i=1}^{k} (\frac{X_i-\mu}{\sigma})^2\) is a chi-sqaure
                distribution.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="normal/gamma-normal">
            <from>normal distribution</from>
            <to>gamma-normal distribution</to>
            <statement>When the \(\sigma\) in the normal distribution is Inverted gamma\((\alpha, \beta)\), the normal distribution becomes a gamma-normal\(\mu, \alpha, \beta)\) distribution</statement>
            <type>Bayesian</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Wald/normal">
            <from>Wald distribution</from>
            <to>normal distribution</to>
            <statement>As \(\lambda \to \infty\), the Wald distribution becomes more like a standard normal (Gaussian) distribution</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/log-gamma">
            <from>gamma distribution</from>
            <to>log-gamma distribution</to>
            <statement>If a random variable \(X\) is gamma-distributed with scale \(\alpha\) and shape \(\beta\), then \(Y = log X\) is log gamma-distributed.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="generalized-gamma/gamma">
            <from>generalized gamma distribution</from>
            <to>gamma distribution</to>
            <statement>The gamma distribution is a special case of the generalized gamma distribution where \(\gamma=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Wald/chi-square">
            <from>Wald distribution</from>
            <to>chi-square distribution</to>
            <statement>If a random variable \(X\) is inverse Gaussian-distributed with mean \(\mu\) and shape parameter \(\lambda\), the \(Y = \lambda(X-\mu)^2/(\mu^2 X)\) has a chi-square distribution</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/chi-square">
            <from>exponential distribution</from>
            <to>chi-Square distribution</to>
            <statement>If \(X \sim Exponential(\lambda=1/2)\), then \(X \sim \chi_{2}^{2}\) has a chi-square distribution with 2 degrees of freedom</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/Erlang">
            <from>chi-square distribution</from>
            <to>Erlang distribution</to>
            <statement>If \(X \sim \chi^{2}(k)\) with even \(k\), then \(X\) is Erlang distributed with shape parameter \(k/2\) and scale parameter \(1/2\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Cauchy/arctangent">
            <from>Cauchy distribution</from>
            <to>arctangent distribution</to>
            <statement>The derivative of the arctangent function gives the formula of the Cauchy distribution. Therefore, the arctangent is called the Cauchy cumulative distribution</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/hypoexponential">
            <from>exponential distribution</from>
            <to>hypoexponential distribution</to>
            <statement>The hypoexponential distribution is the distribution of a general sum (\(\sum X_i)\) of exponential random variables. Its coefficient of variation is less than one, compared to the exponential distribution, whose coefficient of variation is one</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Makeham/Gompertz">
            <from>Makeham distribution</from>
            <to>Gompertz distribution</to>
            <statement>The Gompertz distribution is a special case of the Makeham distribution where \(\gamma=0\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/F">
            <from>exponential distribution</from>
            <to>F distribution</to>
            <statement>If \(X_1, X_2\) are two independent random variables with exponential distribution with \(\alpha=1\), then \(Y=X_1/X_2\) is an F distribution</statement>
            <type>special case, transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/hyperexponential">
            <from>exponential distribution</from>
            <to>hyperexponential distribution</to>
            <statement>The hyperexponential distribution is the distribution whose density is a weighted sum of exponential densities. Its coefficient of variation is greater than one, compared to the exponential distribution, whose coefficient of variation is one</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="IDB/Exponential">
            <from>IDB distribution</from>
            <to>exponential distribution</to>
            <statement>The exponential distribution is a special case of the IDB distribution where \(\delta=\kappa \to 0\) in the IDB function and \(\alpha=1/ \gamma\) in the exponential function</statement>
            <type>special case, limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Muth/exponential">
            <from>Muth distribution</from>
            <to>exponential distribution</to>
            <statement>The exponential distribution is a particular case of the Muth distribution where \(\alpha=1\) in the exponential function and \(\kappa \to 0\) in the Muth function</statement>
            <type>special case, limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/exponential-power">
            <from>continuous uniform distribution</from>
            <to>exponential power distribution</to>
            <statement>If \(X\) has a standard uniform distribution, then \(Y=[log(1-log(1-X))/\gamma]^{1/\kappa}\) has an exponential power distribution with parameters \(\lambda\) and \(\kappa\)</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Laplace/error">
            <from>Laplace distribution</from>
            <to>error distribution</to>
            <statement>The error distribution is a special case of Laplace distribution where \(\alpha_1=\alpha_2\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/triangular">
            <from>continuous uniform distribution</from>
            <to>triangular distribution</to>
            <statement>If \(X_1, X_2\) are two independent random variables with standard uniform distribution, then \(X = X_1-X_2\) is a standard triangular distribution</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/power">
            <from>continuous uniform distribution</from>
            <to>power distribution</to>
            <statement>If X is and independent random variable with standard uniform distribution, then \(X^{1/\beta}\) is a standard power distribution</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/power">
            <from>continuous uniform distribution</from>
            <to>power distribution</to>
            <statement>If \(X\) is a standard uniform distribution, then \(X_(n)\) is a standard power distribution</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="IDB/Rayleigh">
            <from>IDB distribution</from>
            <to>Rayleigh distribution</to>
            <statement>The Rayleigh distribution is a special case of the IDB distribution where \(\delta=2/\alpha, \gamma=0\)</statement>
            <type>special case, transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Weibull/Rayleigh">
            <from>Weibull distribution</from>
            <to>Rayleigh distribution</to>
            <statement>The Rayleigh distribution is a special case of the Weibull distribution where \(\beta=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="triangular/triangular">
            <from>triangular distribution</from>
            <to>triangular distribution</to>
            <statement>The standard triangular distribution is a special case of the triangular distribution where \(a=-1, b=1, m=0\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Log-Logistic/Lomax">
            <from>log-logistic distribution</from>
            <to>Lomax distribution</to>
            <statement>The Lomax distribution is a special case of the Log-logistic distribution where \(\kappa = 1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="log-Logistic/logistic">
            <from>log-logistic distribution</from>
            <to>logistic distribution</to>
            <statement>If X has a log-logistic distribution with scale parameter \(\alpha\) and shape parameter \(\beta\) then \(Y = log(X)\) has a logistic distribution with location parameter \(log(\alpha)\) and scale parameter \(1 / \beta\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Erlang/exponential">
            <from>Erlang distribution</from>
            <to>exponential distribution</to>
            <statement>Exponential distribution is a special case of the Erlang distribution where \(n=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/Erlang">
            <from>exponential distribution</from>
            <to>Erlang distribution</to>
            <statement>If X has the exponential distribution with parameter \(\alpha\) then \(\sum^n X_i\) has the Erlang distribution with parameters \(n, \alpha\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="noncentral-student/student">
            <from>non-central student t distribution</from>
            <to>student t distribution</to>
            <statement>students t-distribution is a special case of the noncentral students t-distribution where \(\delta=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="log-exponential/exponential">
            <from>logistic exponential distribution</from>
            <to>exponential distribution</to>
            <statement>exponential distribution is a special case of the logistic exponential distribution distribution where \(\beta=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Benford">
            <from>continuous uniform distribution</from>
            <to>Benford distribution</to>
            <statement>If X has the standard uniform distribution then \(\lfloor 10^X \rfloor\) has the Benford distribution.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="gamma/inverted-gamma">
            <from>gamma distribution</from>
            <to>inverted gamma distribution</to>
            <statement>If X has the gamma distribution then \(\frac{1}{X}\) has the inverted gamma distribution.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Cauchy/Cauchy">
            <from>Cauchy distribution</from>
            <to>Cauchy distribution</to>
            <statement>standard Cauchy distribution is a special case of the Cauchy distribution distribution where \(a=0, \alpha=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Cauchy/hyperbolic-secant">
            <from>Cauchy distribution</from>
            <to>hyperbolic secant distribution</to>
            <statement>If X has the standard Cauchy distribution then \(\log{\frac{\lvert X \rvert}{\pi}}\) has the hyperbolic secant distribution.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="power-series/logarithmic">
            <from>power-series distribuion</from>
            <to>logarithmic distribution</to>
            <statement>The logarithmic distribution is a particular case of the power-series distribution where \(A(c) = -\log{(1-c)}\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pascal/beta-Pascal">
            <from>Pascal distribuion</from>
            <to>beta-Pascal distribution</to>
            <statement>When the \(p\propto\beta\) in the pascal distribution, the pascal distribution becomes a beta-pascal</statement>
            <type>Bayesian</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Polya/binomial">
            <from>Polya distribuion</from>
            <to>binomial distribution</to>
            <statement>The binomial distribution is a particular case of the Polya distribution where \(\beta=1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="geometric/Pascal">
            <from>geometric distribuion</from>
            <to>Pascal distribution</to>
            <statement>If \(X\) has the geometric distribuion with parameter \(p\), then \(\sum\nolimits_{i=1}^n X_i\) has the pascal distribution with parameters \(n, p\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Pascal/normal">
            <from>Pascal distribuion</from>
            <to>normal distribuion</to>
            <statement>When \(\mu=n(1-p)\) and \(n\to\infty\) then pascal distribuion becomes normal distribuion.</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/normal">
            <from>beta distribution</from>
            <to>normal distribution</to>
            <statement>When \(\beta=\gamma\to\infty\) then beta distribuion becomes normal distribuion.</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="generalized-gamma/log-normal">
            <from>generalized gamma distribution </from>
            <to>log-normal distribution</to>
            <statement>When \(\beta\to\infty\) then generalized gamma distribuion becomes log-normal distribuion.</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/chi">
            <from>chi-square distribution</from>
            <to>chi distribution</to>
            <statement>If \(X\) has the chi-square distribuion with parameter \(n\), then \( \sqrt{X} \) has the chi distribution with parameters \(n\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="chi-square/exponential">
            <from>chi-square distribution</from>
            <to>exponential distribution</to>
            <statement>The exponential distribution is a special case of chi-square distribution with \( \n=2 \).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/chi-square">
            <from>exponential distribution</from>
            <to>chi-square distribution</to>
            <statement>The chi-square distribution is a special case of exponential distribution with \( \alpha=2 \).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="beta/inverted-beta">
            <from>beta distribution</from>
            <to>inverted beta distribution</to>
            <statement>If \(X\) has the beta distribuion with parameters \(\beta\) and \(\gamma\), then \( \frac{X}{1-X} \) has the inverted beta distribution with parameters \(\beta\) and \(\gamma\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="hypoexponential/Erlang">
            <from>hypoexponential distribution</from>
            <to>Erlang distribution</to>
            <statement>The Erlang distribution is a special case of hypoexponential distribution with \(\bar{\alpha}=\alpha\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="doubly-noncentral-t/chi-square">
            <from>doubly noncentral t distribution</from>
            <to>noncentral t distribution</to>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="noncentral-F/F">
            <from>noncentral f distribution</from>
            <to>f distribution</to>
            <statement>When \(\delta\to0\) then noncentral f distribuion becomes f distribuion.</statement>
            <type>limiting</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="hyperexponential/exponential">
            <from>hyperexponential distribution</from>
            <to>exponential distribution</to>
            <statement>The exponential distribution is a special case of hyperexponential distribution with \(\bar{\alpha}=\alpha\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="exponential/Rayleigh">
            <from>exponential distribution</from>
            <to>Rayleigh distribution</to>
            <statement>If \(X\) has the exponential distribution with parameter \(\alpha\), then \( X^2 \) has the Rayleigh distribution with parameter \(\alpha\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/Gompertz">
            <from>continuous uniform distribution</from>
            <to>Gompertz distribution</to>
            <statement>If \(X\) has the standard uniform distribution, then \( \frac{log{1-\frac{(\log{X})(\log{k})}{\delta}}}{\log{k}} \) has the Gompertz distribution.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="error/Laplace">
            <from>error distribution</from>
            <to>Laplace distribution</to>
            <statement>The Laplace distribution is a special case of error distribution with \(a=0\), \(b=\frac{\alpha}{2}\), and \(c=2\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="Laplace/error">
            <from>Laplace distribution</from>
            <to>error distribution</to>
            <statement>The error distribution is a special case of Laplace distribution with \(\alpha_1=\alpha_2\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="continuous-uniform/uniform">
            <from>continuous uniform distribution</from>
            <to>uniform distribution</to>
            <statement>If \(X\) has the standard uniform distribution, then \( a+(b-a)X \) has the uniform distribution with parameters \(a\) and \(b\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="standard-power/continuous-uniform">
            <from>standard power distribution</from>
            <to>continuous uniform distribution</to>
            <statement>The standard uniform distribution is a special case of standard power distribution with \(\beta=1\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="minimax/power">
            <from>minimax distribution</from>
            <to>power distribution</to>
            <statement>The standard power distribution is a special case of minimax distribution with \(\gamma=1\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="power/power">
            <from>power distribution</from>
            <to>power distribution</to>
            <statement>The standard power distribution is a special case of power distribution with \(\alpha=1\).</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="generalized-Pareto/Pareto">
            <from>generalized Pareto distribution</from>
            <to>Pareto distribution</to>
            <statement>If \(X\) has the generalized Pareto distribution with parameters \(\delta\), \(k\), \(\gamma=0\), then \( X+\delta \) has the Pareto distribution with parameters \(k\) and \(\lambda\).</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="extreme-value/Weibull">
            <from>extreme-value distribution</from>
            <to>Weibull distribution</to>
            <statement>If \(X\) has the extreme-value distribution, then \( \log{X} \) has the Weibull distribution with the same parameters.</statement>
            <type>transformation</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="lomax/log-logistic">
            <from>lomax distribution</from>
            <to>log-logistic distribution</to>
            <statement>The log-logistic distribution is a special case of the lomax distribution where \(\kappa = 1\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

        <relation id="TSP/triangular">
            <from>TSP distribution</from>
            <to>triangular distribution</to>
            <statement>The triangular distribution is a special case of the TSP distribution where \(n = 2\)</statement>
            <type>special case</type>
            <cite>doi:10.1080/07408170590948512</cite>
            <cite>doi:10.1198/000313008X270448</cite>
        </relation>

    </relations>
</distributome>
