From c7b2173cbebd075912c6a1c682075df48453f8b3 Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Mon, 30 Mar 2026 16:24:39 -0400 Subject: [PATCH 01/20] Tom's March 30 edit of a new lecture on Blackwell's theorem --- lectures/_static/quant-econ.bib | 71 ++ lectures/_toc.yml | 1 + lectures/blackwell_kihlstrom.md | 1202 +++++++++++++++++++++++++++++++ 3 files changed, 1274 insertions(+) create mode 100644 lectures/blackwell_kihlstrom.md diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 0ba2eddd3..d674fe311 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -3,6 +3,77 @@ Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ ### + + +@inproceedings{blackwell1951, + author = {Blackwell, David}, + title = {Comparison of Experiments}, + booktitle = {Proceedings of the Second {Berkeley} Symposium on Mathematical + Statistics and Probability}, + editor = {Neyman, Jerzy}, + pages = {93--102}, + year = {1951}, + publisher = {University of California Press}, + address = {Berkeley, CA} +} + +@article{blackwell1953, + author = {Blackwell, David}, + title = {Equivalent Comparisons of Experiments}, + journal = {Annals of Mathematical Statistics}, + volume = {24}, + number = {2}, + pages = {265--272}, + year = {1953}, + doi = {10.1214/aoms/1177729032} +} + +@techreport{bonnenblust1949, + author = {Bohnenblust, H. F. and Shapley, Lloyd S. and Sherman, Seymour}, + title = {Reconnaissance in Game Theory}, + institution = {The RAND Corporation}, + number = {RM-208}, + year = {1949}, + address = {Santa Monica, CA}, + note = {Cited for the economic criterion for comparing experiments} +} + +@article{degroot1962, + author = {{DeGroot}, Morris H.}, + title = {Uncertainty, Information, and Sequential Experiments}, + journal = {Annals of Mathematical Statistics}, + volume = {33}, + number = {2}, + pages = {404--419}, + year = {1962}, + doi = {10.1214/aoms/1177704567} +} + +@incollection{kihlstrom1984, + author = {Kihlstrom, Richard E.}, + title = {A {Bayesian} Exposition of {Blackwell}'s Theorem on the + Comparison of Experiments}, + booktitle = {Bayesian Models in Economic Theory}, + editor = {Boyer, Marcel and Kihlstrom, Richard E.}, + series = {Studies in Bayesian Econometrics}, + volume = {5}, + pages = {13--31}, + year = {1984}, + publisher = {North-Holland}, + address = {Amsterdam} +} + +@article{kihlstrom1974a, + author = {Kihlstrom, Richard E.}, + title = {A General Theory of Demand for Information about Product Quality}, + journal = {Journal of Economic Theory}, + volume = {8}, + number = {4}, + pages = {413--439}, + year = {1974}, + doi = {10.1016/0022-0531(74)90003-0} +} + @inproceedings{hansen2004certainty, title={Certainty equivalence and model uncertainty}, author={Hansen, Lars Peter and Sargent, Thomas J}, diff --git a/lectures/_toc.yml b/lectures/_toc.yml index da19f0b03..247a89b34 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -36,6 +36,7 @@ parts: - file: divergence_measures - file: likelihood_ratio_process - file: likelihood_ratio_process_2 + - file: blackwell_kihlstrom - file: likelihood_var - file: imp_sample - file: wald_friedman diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md new file mode 100644 index 000000000..cdb828854 --- /dev/null +++ b/lectures/blackwell_kihlstrom.md @@ -0,0 +1,1202 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(blackwell_kihlstrom)= +```{raw} jupyter +
+ + QuantEcon + +
+``` + +# Blackwell's Theorem on Comparing Experiments + +```{contents} Contents +:depth: 2 +``` + +## Overview + +This lecture explains **Blackwell's theorem** {cite}`blackwell1951,blackwell1953` on ranking statistical +experiments, following the Bayesian exposition of {cite}`kihlstrom1984`. + +Consider two random variables, $\tilde{x}_\mu$ and $\tilde{x}_\nu$, each correlated +with an unknown state $\tilde{s}$. A decision maker wants to know which observation +conveys more information about $\tilde{s}$. + +Blackwell identified a clean answer: $\tilde{x}_\mu$ is **at least as informative** as +$\tilde{x}_\nu$ if and only if any decision maker who observes $\tilde{x}_\mu$ can do +at least as well (in expected utility) as one who observes $\tilde{x}_\nu$. + +Remarkably, this economic criterion is equivalent to two purely statistical ones: + +- **Sufficiency** (Blackwell): $\tilde{x}_\mu$ is sufficient for $\tilde{x}_\nu$ — the + distribution of $\tilde{x}_\nu$ can be reproduced by passing $\tilde{x}_\mu$ through + a randomisation. +- **Uncertainty reduction** (DeGroot {cite}`degroot1962`): $\tilde{x}_\mu$ reduces + every concave measure of uncertainty at least as much as $\tilde{x}_\nu$ does. + +Kihlstrom's Bayesian restatement places the **posterior distribution** at the centre. +A more informative experiment creates a more dispersed distribution of posteriors — a +**mean-preserving spread** — which links Blackwell's ordering directly to +**second-order stochastic dominance** on the simplex of beliefs. + +We proceed in the following steps: + +1. Set up notation and define experiments as Markov matrices. +2. Define stochastic transformations (Markov kernels). +3. State the three equivalent criteria. +4. State and sketch the proof of the main theorem. +5. Develop the Bayesian interpretation via standard experiments and mean-preserving + spreads. +6. Illustrate each idea with Python simulations. + +Let's start by importing some tools. + +```{code-cell} ipython3 +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.tri as mtri +from scipy.stats import dirichlet, beta as beta_dist +from scipy.optimize import minimize +from itertools import product + +np.random.seed(42) +``` + +--- + +## Experiments and Markov Matrices + +### The state space and experiments + +Let $S = \{s_1, \ldots, s_N\}$ be a finite set of possible states of the world. + +An **experiment** is described by the conditional distribution of an observed signal +$\tilde{x}$ given the state $\tilde{s}$. + +When the signal space is also finite, say $X = \{x_1, \ldots, x_M\}$, an experiment +reduces to an $N \times M$ **Markov matrix** + +$$ +\mu = [\mu_{ij}], \qquad +\mu_{ij} = \Pr(\tilde{x}_\mu = x_j \mid \tilde{s} = s_i) \geq 0, +\quad \sum_{j=1}^{M} \mu_{ij} = 1 \;\forall\, i. +$$ + +Each row $i$ gives the distribution of signals when the true state is $s_i$. + +```{code-cell} ipython3 +# Example: two states, three signals +# mu[i, j] = Pr(signal j | state i) +mu = np.array([[0.6, 0.3, 0.1], # state 1: signal is quite informative + [0.1, 0.3, 0.6]]) # state 2: opposite pattern + +nu = np.array([[0.5, 0.2, 0.3], # coarser experiment + [0.2, 0.5, 0.3]]) + +print("Experiment μ (rows sum to 1):") +print(mu) +print("\nExperiment ν:") +print(nu) +print("\nRow sums μ:", mu.sum(axis=1)) +print("Row sums ν:", nu.sum(axis=1)) +``` + +### Stochastic transformations (Markov kernels) + +A **stochastic transformation** $Q$ maps signals of one experiment to signals of +another by a further randomisation. + +In the discrete setting with $M$ input signals and $K$ output signals, $Q$ is an +$M \times K$ Markov matrix: $q_{lk} \geq 0$ and $\sum_k q_{lk} = 1$ for every row $l$. + +```{admonition} Definition (Sufficiency) +:class: tip +Experiment $\mu$ is **sufficient for** $\nu$ if there exists a stochastic +transformation $Q$ (an $M \times K$ Markov matrix) such that + +$$ +\nu = \mu \, Q, +$$ + +meaning that an observer of $\tilde{x}_\mu$ can generate the distribution of +$\tilde{x}_\nu$ by passing their signal through $Q$. +``` + +The intuition: if you hold the more informative signal $\tilde{x}_\mu$, you can always +*throw away* information to produce a signal distributed like $\tilde{x}_\nu$; +the reverse is impossible. + +```{code-cell} ipython3 +def is_markov(M, tol=1e-10): + """Check whether a matrix is a valid Markov (row-stochastic) matrix.""" + return np.all(M >= -tol) and np.allclose(M.sum(axis=1), 1.0) + +def find_stochastic_transform(mu, nu, tol=1e-8): + """ + Try to find Q such that nu ≈ mu @ Q using least-squares then project. + Returns Q and the residual ||nu - mu @ Q||. + This is a simple demonstration for the discrete finite case. + """ + N, M = mu.shape + _, K = nu.shape + + # Solve nu = mu @ Q column by column using non-negative least squares + from scipy.optimize import lsq_linear + + Q = np.zeros((M, K)) + for k in range(K): + b = nu[:, k] + # Constraints: Q[:, k] >= 0, sum(Q[:, k]) = 1 + # Use lsq_linear with bounds, ignoring sum constraint for now + result = lsq_linear(mu, b, bounds=(0, np.inf)) + Q[:, k] = result.x + + # Normalise rows so Q is row-stochastic + row_sums = Q.sum(axis=1, keepdims=True) + row_sums = np.where(row_sums == 0, 1, row_sums) + Q = Q / row_sums + residual = np.linalg.norm(nu - mu @ Q) + return Q, residual + +# Build a ν that is deliberately a garbling of μ +# Q maps 3 signals -> 2 signals (merge signals 2&3) +Q_true = np.array([[1.0, 0.0], + [0.0, 1.0], + [0.0, 1.0]]) + +nu_garbled = mu @ Q_true +print("ν = μ @ Q_true:") +print(nu_garbled) +print("ν is Markov:", is_markov(nu_garbled)) + +Q_found, res = find_stochastic_transform(mu, nu_garbled) +print(f"\nRecovered Q (residual = {res:.2e}):") +print(np.round(Q_found, 4)) +print("Rows of Q sum to:", Q_found.sum(axis=1).round(4)) +``` + +--- + +## Three Equivalent Criteria for "More Informative" + +### Criterion 1 — The Economic Criterion + +Let $A$ be a compact convex set of actions and $u: A \times S \to \mathbb{R}$ a +bounded utility function. + +A decision maker observes $x \in X$, applies Bayes' rule to update beliefs about +$\tilde{s}$, and chooses $d(x) \in A$ to maximise expected utility. + +Let $p = (p_1, \ldots, p_N)$ be the prior over states, and write + +$$ +P = \bigl\{(p_1, \ldots, p_N) : p_i \geq 0,\; \textstyle\sum_i p_i = 1\bigr\} +$$ + +for the probability simplex. + +The set of **achievable expected-utility vectors** under experiment $\mu$ is + +$$ +B(\mu, A) = \Bigl\{v \in \mathbb{R}^N : + v_i = \textstyle\int_X u(f(x), s_i)\,\mu_i(dx) + \text{ for some measurable } f: X \to A \Bigr\}. +$$ + +```{admonition} Definition (Economic Criterion — Bonnenblust–Shapley–Sherman) +:class: tip +$\mu$ is **at least as informative as** $\nu$ in the economic sense if + +$$ +B(\mu, A) \supseteq B(\nu, A) +$$ + +for every compact convex action set $A$ and every prior $p \in P$. +``` + +Equivalently, every rational decision maker weakly prefers to observe $\tilde{x}_\mu$ +over $\tilde{x}_\nu$. + +### Criterion 2 — The Sufficiency Criterion (Blackwell) + +```{admonition} Definition (Blackwell Sufficiency) +:class: tip +$\mu \geq \nu$ in Blackwell's sense if there exists a stochastic transformation +$Q: X \to Y$ such that + +$$ +\nu_i(E) = (Q \circ \mu_i)(E) +\quad \forall\, E \in \mathscr{G},\; i = 1, \ldots, N. +$$ +``` + +In matrix notation for finite experiments: $\nu = \mu \, Q$. + +### Criterion 3 — The Uncertainty-Function Criterion (DeGroot) + +{cite}`degroot1962` calls any **concave** function $U: P \to \mathbb{R}$ an +**uncertainty function**. + +The prototypical example is Shannon entropy: + +$$ +U(p) = -\sum_{i=1}^{N} p_i \log p_i. +$$ + +```{admonition} Definition (DeGroot Uncertainty Criterion) +:class: tip +$\mu$ **reduces expected uncertainty at least as much as** $\nu$ if, for every +concave $U: P \to \mathbb{R}$, + +$$ +\int_P U(p)\,\hat\mu^c(dp) +\;\leq\; +\int_P U(p)\,\hat\nu^c(dp), +$$ + +where $\hat\mu^c$ is the distribution of the posterior induced by experiment $\mu$ +starting from the uniform prior $c = (1/N, \ldots, 1/N)$. +``` + +Jensen's inequality guarantees that observing any signal *weakly* reduces expected +uncertainty ($\int U(p^\mu)\,d\hat\mu^c \leq U(c)$). The criterion asks whether $\mu$ +always reduces it *at least as much* as $\nu$. + +--- + +## The Main Theorem + +```{admonition} Theorem (Blackwell 1951, 1953; Bonnenblust et al. 1949; DeGroot 1962) +:class: important +The following three conditions are equivalent: + +(i) **Economic criterion:** $B(\mu, A) \supseteq B(\nu, A)$ for every compact + convex $A$ and every prior $p \in P$. + +(ii) **Sufficiency criterion:** There exists a stochastic transformation $Q$ from $X$ + to $Y$ such that $\nu = Q \circ \mu$. + +(iii) **Uncertainty criterion:** $\int_P U(p)\,\hat\mu^c(dp) \leq \int_P U(p)\,\hat\nu^c(dp)$ + for every concave $U$ and the uniform prior $c$. +``` + +The proof establishes the chain +(i) $\Leftrightarrow$ (ii) $\Leftrightarrow$ (iii). + +**Sketch (ii $\Rightarrow$ i):** If $\nu = \mu Q$, any decision rule $f$ for $\tilde{x}_\nu$ +can be replicated by first observing $\tilde{x}_\mu$, drawing $\tilde{x}_\nu \sim Q(\tilde{x}_\mu, \cdot)$, +then applying $f$. Hence $B(\nu, A) \subseteq B(\mu, A)$. + +**Sketch (i $\Rightarrow$ ii):** This uses a separating-hyperplane argument. Since +$B(\mu, A) \supseteq B(\nu, A)$ for every $A$, standard duality implies the existence +of a mean-preserving stochastic transformation $D$ mapping posteriors of $\nu$ to +posteriors of $\mu$, which constructs the required $Q$. + +**Sketch (ii $\Leftrightarrow$ iii):** Given $Q$, Jensen's inequality applied to any +concave $U$ gives $\mathbb{E}[U(p^\mu)] \leq \mathbb{E}[U(p^\nu)]$. The converse — +that the condition for all concave $U$ forces the existence of $Q$ — is proved in +{cite}`blackwell1953`. + +--- + +## Kihlstrom's Bayesian Interpretation + +### Posteriors and standard experiments + +The central object in Kihlstrom's analysis is the **posterior belief vector**. + +When prior $p$ holds and experiment $\mu$ produces signal $x$, Bayes' rule gives + +$$ +p_i^\mu(x) = \Pr(\tilde{s} = s_i \mid \tilde{x}_\mu = x) += \frac{\mu_{ix} \, p_i}{\sum_j \mu_{jx}\, p_j}, \qquad i = 1, \ldots, N. +$$ + +The posterior $p^\mu(x) \in P$ is a *random variable* on the simplex. + +```{admonition} Key property (mean preservation) +:class: note +The prior $p$ is the expectation of the posterior: + +$$ +\mathbb{E}[p^\mu] = \sum_x \Pr(\tilde{x}_\mu = x)\, p^\mu(x) = p. +$$ + +This is sometimes called the **law of iterated expectations for beliefs**. +``` + +The **standard experiment** ${}^c\mu^*$ records only the posterior: it maps the +prior $c$ to the random variable $p^\mu(x) \in P$. Its distribution $\hat\mu^c$ +on $P$ satisfies $\int_P p\;\hat\mu^c(dp) = c$. + +Two experiments are **informationally equivalent** when they induce the same +distribution of posteriors. The standard experiment is the minimal sufficient +statistic for $\mu$. + +```{code-cell} ipython3 +def compute_posteriors(mu, prior): + """ + Compute the posterior distribution for each signal realisation. + + Parameters + ---------- + mu : (N, M) Markov matrix — mu[i, j] = Pr(signal j | state i) + prior : (N,) prior probabilities over states + + Returns + ------- + posteriors : (M, N) array — posteriors[j] = posterior given signal j + signal_probs : (M,) marginal probability of each signal + """ + N, M = mu.shape + # Marginal probability of each signal: Pr(x_j) = sum_i Pr(x_j|s_i)*p_i + signal_probs = mu.T @ prior # shape (M,) + # Posterior: p(s_i | x_j) = mu[i,j]*p[i] / Pr(x_j) + posteriors = (mu.T * prior) / signal_probs[:, None] # shape (M, N) + return posteriors, signal_probs + + +def check_mean_preservation(posteriors, signal_probs, prior): + """Verify E[posterior] == prior.""" + expected_posterior = (posteriors * signal_probs[:, None]).sum(axis=0) + return expected_posterior, np.allclose(expected_posterior, prior) + + +# Two-state example: states s1, s2 +N = 2 +prior = np.array([0.5, 0.5]) + +# More informative experiment μ +mu_info = np.array([[0.8, 0.2], + [0.2, 0.8]]) + +# Less informative experiment ν +nu_info = np.array([[0.6, 0.4], + [0.4, 0.6]]) + +post_mu, probs_mu = compute_posteriors(mu_info, prior) +post_nu, probs_nu = compute_posteriors(nu_info, prior) + +print("=== Experiment μ (more informative) ===") +print("Signal probabilities:", probs_mu.round(3)) +print("Posteriors (row = signal, col = state):") +print(post_mu.round(3)) +mean_mu, ok_mu = check_mean_preservation(post_mu, probs_mu, prior) +print(f"E[posterior] = {mean_mu.round(4)} (equals prior: {ok_mu})") + +print("\n=== Experiment ν (less informative) ===") +print("Signal probabilities:", probs_nu.round(3)) +print("Posteriors:") +print(post_nu.round(3)) +mean_nu, ok_nu = check_mean_preservation(post_nu, probs_nu, prior) +print(f"E[posterior] = {mean_nu.round(4)} (equals prior: {ok_nu})") +``` + +### Visualising posterior distributions on the simplex + +For $N = 2$ states, the simplex $P$ is the unit interval $[0, 1]$ (the probability +of state $s_1$). We can directly plot the distribution of posteriors under +experiments $\mu$ and $\nu$. + +```{code-cell} ipython3 +def plot_posterior_distributions(mu_matrix, nu_matrix, prior, + labels=("μ (more informative)", + "ν (less informative)")): + """ + For a two-state experiment, plot the distribution of posteriors + (i.e., the standard experiment distribution) on [0,1]. + """ + posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) + posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) + + fig, axes = plt.subplots(1, 2, figsize=(11, 4), sharey=False) + prior_val = prior[0] + + for ax, posts, probs, label in zip( + axes, [posts_mu, posts_nu], [probs_mu, probs_nu], labels): + p_s1 = posts[:, 0] # posterior prob of state s1 for each signal + ax.vlines(p_s1, 0, probs, linewidth=6, color="steelblue", alpha=0.7) + ax.axvline(prior_val, color="tomato", linestyle="--", linewidth=1.5, + label=f"prior = {prior_val:.2f}") + ax.set_xlim(0, 1) + ax.set_xlabel(r"Posterior $p(s_1 \mid x)$", fontsize=12) + ax.set_ylabel("Probability mass", fontsize=12) + ax.set_title(label, fontsize=12) + ax.legend() + # Annotate mean + mean_post = (p_s1 * probs).sum() + ax.axvline(mean_post, color="green", linestyle=":", linewidth=1.5, + label=f"E[post] = {mean_post:.2f}") + ax.legend() + + fig.suptitle("Distribution of posteriors (standard experiment)\n" + "More informative → more dispersed from prior", + fontsize=12, y=1.02) + plt.tight_layout() + plt.show() + +plot_posterior_distributions(mu_info, nu_info, prior) +``` + +The more informative experiment $\mu$ pushes posteriors further from the prior in +both directions, producing a more dispersed distribution on $[0,1]$. + +### Mean-preserving spreads and Blackwell's ordering + +Kihlstrom's key reformulation is: + +```{admonition} Theorem (Kihlstrom's Reformulation) +:class: important +$\mu \geq \nu$ in Blackwell's sense **if and only if** $\hat\mu^c$ is a +**mean-preserving spread** of $\hat\nu^c$; that is, + +$$ +\int_P g(p)\,\hat\mu^c(dp) \;\geq\; \int_P g(p)\,\hat\nu^c(dp) +$$ + +for every **convex** function $g: P \to \mathbb{R}$. +``` + +Equivalently, $\hat\mu^c$ **second-order stochastically dominates** $\hat\nu^c$ +(in the sense of mean-preserving spreads). + +The intuition: a better experiment resolves more uncertainty, spreading posteriors +further from the prior on average. Any convex $g$ assigns higher expected value to +a more dispersed distribution (Jensen's inequality in reverse). + +```{code-cell} ipython3 +def check_mps_convex_functions(mu_matrix, nu_matrix, prior, n_functions=200): + """ + Verify the mean-preserving spread condition for random convex functions. + + We test: E[g(p^μ)] >= E[g(p^ν)] for convex g + using a family of convex functions g(p) = (p - t)^+ = max(p-t, 0). + """ + posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) + posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) + + p_mu = posts_mu[:, 0] # posteriors on s1 + p_nu = posts_nu[:, 0] + + thresholds = np.linspace(0, 1, n_functions) + diffs = [] + for t in thresholds: + Eg_mu = (np.maximum(p_mu - t, 0) * probs_mu).sum() + Eg_nu = (np.maximum(p_nu - t, 0) * probs_nu).sum() + diffs.append(Eg_mu - Eg_nu) + + fig, ax = plt.subplots(figsize=(8, 4)) + ax.plot(thresholds, diffs, color="steelblue", linewidth=2) + ax.axhline(0, color="tomato", linestyle="--") + ax.fill_between(thresholds, diffs, 0, + where=np.array(diffs) >= 0, + alpha=0.25, color="steelblue", + label=r"$E[g(p^\mu)] - E[g(p^\nu)] \geq 0$") + ax.set_xlabel("Threshold $t$", fontsize=12) + ax.set_ylabel(r"$E[\max(p-t,0)]$ difference", fontsize=12) + ax.set_title( + r"Mean-preserving spread check: $E[g(p^\mu)] \geq E[g(p^\nu)]$" + "\nfor convex functions $g(p) = \max(p - t, 0)$", + fontsize=11) + ax.legend(fontsize=11) + plt.tight_layout() + plt.show() + + all_non_negative = all(d >= -1e-10 for d in diffs) + print(f"μ is a mean-preserving spread of ν: {all_non_negative}") + return diffs + +_ = check_mps_convex_functions(mu_info, nu_info, prior) +``` + +--- + +## Simulating the Blackwell Ordering with Many States + +To move beyond two states we simulate richer experiments. + +We take $N = 3$ states and compare a more-informative experiment $\mu$ (whose +signal is strongly correlated with the state) against a less-informative $\nu$ +(a garbling of $\mu$). + +```{code-cell} ipython3 +# Three states, three signals +N3 = 3 +prior3 = np.array([1/3, 1/3, 1/3]) + +# Informative experiment: strong diagonal +mu3 = np.array([[0.7, 0.2, 0.1], + [0.1, 0.7, 0.2], + [0.2, 0.1, 0.7]]) + +# Garbling matrix: merge signals 2 and 3 +Q3 = np.array([[0.9, 0.05, 0.05], + [0.05, 0.8, 0.15], + [0.05, 0.15, 0.8]]) # row-stochastic + +nu3 = mu3 @ Q3 + +print("μ (3×3):") +print(np.round(mu3, 2)) +print("\nQ (garbling):") +print(np.round(Q3, 2)) +print("\nν = μ @ Q:") +print(np.round(nu3, 3)) +``` + +### Plotting posterior clouds on the 2-simplex + +For three states, posteriors live in a 2-simplex (a triangle). We draw many +samples from each experiment and plot where the posteriors land. + +```{code-cell} ipython3 +def sample_posteriors(mu_matrix, prior, n_draws=3000): + """ + Simulate n_draws observations from the experiment and compute + the resulting posterior beliefs. + Returns array of shape (n_draws, N). + """ + N, M = mu_matrix.shape + # Draw a state + states = np.random.choice(N, size=n_draws, p=prior) + # Draw a signal conditioned on the state + signals = np.array([np.random.choice(M, p=mu_matrix[s]) for s in states]) + # Compute posterior + posteriors, signal_probs = compute_posteriors(mu_matrix, prior) + return posteriors[signals] # shape (n_draws, N) + + +def simplex_to_cart(pts): + """Convert 3-simplex barycentric coordinates to 2-D Cartesian.""" + corners = np.array([[0.0, 0.0], + [1.0, 0.0], + [0.5, np.sqrt(3)/2]]) + return pts @ corners + + +def plot_simplex_posteriors(mu_matrix, nu_matrix, prior3, n_draws=3000): + posts_mu = sample_posteriors(mu_matrix, prior3, n_draws) + posts_nu = sample_posteriors(nu_matrix, prior3, n_draws) + + cart_mu = simplex_to_cart(posts_mu) + cart_nu = simplex_to_cart(posts_nu) + prior_cart = simplex_to_cart(prior3[None, :])[0] + + corners = np.array([[0.0, 0.0], + [1.0, 0.0], + [0.5, np.sqrt(3)/2]]) + triangle = plt.Polygon(corners, fill=False, edgecolor="black", linewidth=1.5) + + fig, axes = plt.subplots(1, 2, figsize=(12, 5)) + titles = ["μ (more informative)", "ν (less informative / garbled)"] + data = [(cart_mu, "steelblue"), (cart_nu, "darkorange")] + labels = ["$s_1$", "$s_2$", "$s_3$"] + offsets = [(-0.07, -0.05), (1.02, -0.05), (0.48, np.sqrt(3)/2 + 0.03)] + + for ax, (cart, c), title in zip(axes, data, titles): + tri = plt.Polygon(corners, fill=False, edgecolor="black", linewidth=1.5) + ax.add_patch(tri) + ax.scatter(cart[:, 0], cart[:, 1], s=4, alpha=0.25, color=c) + ax.scatter(*prior_cart, s=120, color="red", zorder=5, + label="prior", marker="*") + for i, (lbl, off) in enumerate(zip(labels, offsets)): + ax.text(corners[i][0] + off[0], corners[i][1] + off[1], + lbl, fontsize=13) + ax.set_xlim(-0.15, 1.15) + ax.set_ylim(-0.1, np.sqrt(3)/2 + 0.1) + ax.set_aspect("equal") + ax.axis("off") + ax.set_title(title, fontsize=12) + ax.legend(fontsize=11, loc="upper right") + + fig.suptitle("Posterior clouds on the 2-simplex\n" + "More informative experiment → posteriors spread further from prior", + fontsize=12) + plt.tight_layout() + plt.show() + +plot_simplex_posteriors(mu3, nu3, prior3) +``` + +The posteriors under $\mu$ cluster near the vertices (near-certain beliefs), while +those under the garbled $\nu$ cluster closer to the centre (the prior). + +--- + +## The DeGroot Uncertainty Function + +### Concave uncertainty functions and the value of information + +{cite}`degroot1962` formalises the value of information through an **uncertainty function** +$U: P \to \mathbb{R}$ that must be: + +- **Concave**: by Jensen, observing any signal weakly reduces expected uncertainty. +- **Symmetric**: depends on the components of $p$, not their labelling. +- **Normalised**: maximised at $p = (1/N, \ldots, 1/N)$ and minimised at vertices. + +The **value of experiment $\mu$ given prior $p$** is + +$$ +I(\tilde{x}_\mu;\, \tilde{s};\, U) += U(p) - \mathbb{E}[U(p^\mu)], +$$ + +the expected reduction in uncertainty. A key result is that $\mu \geq \nu$ **if and +only if** $I(\tilde{x}_\mu; \tilde{s}; U) \geq I(\tilde{x}_\nu; \tilde{s}; U)$ for +**every** concave $U$. + +### Shannon entropy as a special case + +The canonical uncertainty function is Shannon entropy + +$$ +U_H(p) = -\sum_{i=1}^{N} p_i \log p_i. +$$ + +Under the uniform prior $c = (1/N, \ldots, 1/N)$, DeGroot's value formula becomes + +$$ +I(\tilde{x}_\mu, c;\, U_H) += \log N - H(\tilde{s} \mid \tilde{x}_\mu), +$$ + +where $H(\tilde{s} \mid \tilde{x}_\mu)$ is the conditional entropy of the state given +the signal — exactly the **mutual information** between $\tilde{x}_\mu$ and $\tilde{s}$. + +```{note} +The Blackwell ordering implies the entropy-based inequality, but the *converse fails*: +entropy alone does not pin down the full Blackwell ordering — you need the inequality +for **every** concave $U$. +``` + +```{code-cell} ipython3 +def entropy(p, eps=1e-12): + """Shannon entropy of a probability vector.""" + p = np.asarray(p, dtype=float) + p = np.clip(p, eps, 1.0) + return -np.sum(p * np.log(p)) + + +def degroot_value(mu_matrix, prior, U_func): + """ + Compute DeGroot's value of information I = U(prior) - E[U(posterior)]. + """ + posts, probs = compute_posteriors(mu_matrix, prior) + prior_uncertainty = U_func(prior) + expected_post_uncertainty = sum( + probs[j] * U_func(posts[j]) for j in range(len(probs))) + return prior_uncertainty - expected_post_uncertainty + + +# --- Several concave uncertainty functions --- +def gini_impurity(p): + """Gini impurity: 1 - sum(p_i^2).""" + return 1.0 - np.sum(np.asarray(p)**2) + +def tsallis_entropy(p, q=2): + """Tsallis entropy of order q (concave for q>1).""" + p = np.clip(p, 1e-12, 1.0) + return (1 - np.sum(p**q)) / (q - 1) + +def min_entropy(p): + """Min-entropy: -log(max(p)).""" + return -np.log(np.max(np.clip(p, 1e-12, 1.0))) + +uncertainty_functions = { + "Shannon entropy": entropy, + "Gini impurity": gini_impurity, + "Tsallis (q=2)": tsallis_entropy, + "Min-entropy": min_entropy, +} + +print(f"{'Uncertainty function':<22} {'I(μ)':<10} {'I(ν)':<10} {'I(μ)≥I(ν)?'}") +print("-" * 58) +for name, U in uncertainty_functions.items(): + I_mu = degroot_value(mu_info, prior, U) + I_nu = degroot_value(nu_info, prior, U) + print(f"{name:<22} {I_mu:<10.4f} {I_nu:<10.4f} {I_mu >= I_nu - 1e-10}") +``` + +As predicted by the theorem, $I(\mu) \geq I(\nu)$ for every concave uncertainty +function once we know $\mu \geq \nu$ in the Blackwell sense. + +### Value of information as a function of experiment quality + +We now parameterise a continuum of experiments that interpolate between the +completely uninformative experiment (signal is independent of the state) and the +perfectly informative one (signal perfectly reveals the state). + +For $N = 2$ states, a natural family is + +$$ +\mu(\theta) = (1 - \theta) \cdot \tfrac{1}{2}\mathbf{1}\mathbf{1}^\top + + \theta \cdot I_2, +\quad \theta \in [0, 1], +$$ + +where the first term is the completely mixed (uninformative) matrix and $I_2$ is the +identity (perfectly informative). + +```{code-cell} ipython3 +def make_experiment(theta, N=2): + """ + Parameterised experiment: theta=0 is uninformative, theta=1 is perfect. + mu(theta) = (1-theta)*(1/N)*ones + theta*I + """ + return (1 - theta) * np.ones((N, N)) / N + theta * np.eye(N) + + +thetas = np.linspace(0, 1, 100) +prior2 = np.array([0.5, 0.5]) + +fig, ax = plt.subplots(figsize=(9, 4)) +for name, U in uncertainty_functions.items(): + values = [degroot_value(make_experiment(t), prior2, U) for t in thetas] + # Normalise to [0,1] for comparability across functions + vmin, vmax = values[0], values[-1] + normed = (np.array(values) - vmin) / (vmax - vmin + 1e-15) + ax.plot(thetas, normed, label=name, linewidth=2) + +ax.set_xlabel(r"Experiment quality $\theta$ (0 = uninformative, 1 = perfect)", + fontsize=11) +ax.set_ylabel("Normalised value of information $I(\\mu(\\theta))$", fontsize=11) +ax.set_title("Value of information rises monotonically with experiment quality\n" + "for every concave uncertainty function", fontsize=11) +ax.legend(fontsize=10) +plt.tight_layout() +plt.show() +``` + +Every concave uncertainty function assigns weakly higher value to a more informative +experiment — a graphical illustration of the equivalence (i) $\Leftrightarrow$ (iii). + +--- + +## Connection to Second-Order Stochastic Dominance + +The uncertainty-function representation makes the connection to **second-order +stochastic dominance (SOSD)** explicit. + +Because $U$ is concave, $-U$ is convex, and the condition + +$$ +\mathbb{E}[U(p^\mu)] \leq \mathbb{E}[U(p^\nu)] \quad \text{for all concave } U +$$ + +is precisely the statement that $\hat\mu^c$ dominates $\hat\nu^c$ in the +**mean-preserving spread** sense on $P$. + +The Blackwell ordering on *experiments* is therefore isomorphic to the SOSD ordering +on *distributions of posteriors*. + +```{code-cell} ipython3 +def lorenz_curve_1d(weights, values): + """ + Compute the Lorenz-like CDF used for SOSD comparisons. + Returns (sorted values, cumulative probability mass). + """ + idx = np.argsort(values) + sorted_vals = values[idx] + sorted_wts = weights[idx] + cum_mass = np.cumsum(sorted_wts) + return sorted_vals, cum_mass + + +def plot_sosd_posteriors(mu_matrix, nu_matrix, prior, title=""): + """ + Plot the CDFs of the posterior-on-s1 distributions under mu and nu, + and verify SOSD (mu dominates nu in the MPS sense). + """ + posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) + posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) + + p_mu = posts_mu[:, 0] + p_nu = posts_nu[:, 0] + + sv_mu, cm_mu = lorenz_curve_1d(probs_mu, p_mu) + sv_nu, cm_nu = lorenz_curve_1d(probs_nu, p_nu) + + fig, axes = plt.subplots(1, 2, figsize=(11, 4)) + + # Left: CDFs + ax = axes[0] + for sv, cm, lbl, c in [(sv_mu, cm_mu, "μ", "steelblue"), + (sv_nu, cm_nu, "ν", "darkorange")]: + xs = np.concatenate([[0], sv, [1]]) + ys = np.concatenate([[0], cm, [1]]) + ax.step(xs, ys, where="post", label=lbl, color=c, linewidth=2) + ax.set_xlabel(r"Posterior $p(s_1 \mid x)$", fontsize=12) + ax.set_ylabel("Cumulative probability", fontsize=12) + ax.set_title("CDFs of posterior distributions", fontsize=11) + ax.legend(fontsize=11) + ax.axvline(prior[0], linestyle="--", color="gray", alpha=0.6, + label="prior") + + # Right: integrated CDFs (SOSD criterion: F_nu integrates >= F_mu) + ax2 = axes[1] + grid = np.linspace(0, 1, 200) + + def integrated_cdf(sorted_vals, cum_mass, grid): + # CDF at each grid point + cdf = np.array([cum_mass[sorted_vals <= t].max() + if np.any(sorted_vals <= t) else 0.0 + for t in grid]) + return np.cumsum(cdf) * (grid[1] - grid[0]) + + int_mu = integrated_cdf(sv_mu, cm_mu, grid) + int_nu = integrated_cdf(sv_nu, cm_nu, grid) + + ax2.plot(grid, int_mu, label="∫F_μ", color="steelblue", linewidth=2) + ax2.plot(grid, int_nu, label="∫F_ν", color="darkorange", linewidth=2) + ax2.fill_between(grid, int_mu, int_nu, + where=int_nu >= int_mu, + alpha=0.2, color="darkorange", + label="∫F_ν ≥ ∫F_μ (μ MPS-dominates ν)") + ax2.set_xlabel(r"$t$", fontsize=12) + ax2.set_ylabel("Integrated CDF", fontsize=12) + ax2.set_title("SOSD: integrated CDFs\n(μ dominates ν iff ∫F_ν ≥ ∫F_μ everywhere)", + fontsize=11) + ax2.legend(fontsize=10) + + fig.suptitle(title or "Second-order stochastic dominance of posterior distributions", + fontsize=11, y=1.01) + plt.tight_layout() + plt.show() + +plot_sosd_posteriors(mu_info, nu_info, prior, + title="μ is a mean-preserving spread of ν:\n" + "μ second-order stochastically dominates ν") +``` + +--- + +## The Stochastic Transformation as a Mean-Preserving Randomisation + +Kihlstrom proves that (i) $\Rightarrow$ (ii) by explicit construction. + +Given that $\mu$ achieves at least the value of $\nu$ for every user, he constructs +a stochastic transformation $D(p^0, \cdot)$ on $P$ that is **mean-preserving**: + +$$ +\int_P p\; D(p^0, dp) = p^0. +$$ + +Setting $Q = D$ provides the Markov kernel witnessing Blackwell sufficiency. + +The mean-preservation condition says: passing $\tilde{x}_\mu$ through $Q$ to +produce a synthetic $\tilde{x}_\nu$ cannot add information — it only destroys it. + +```{code-cell} ipython3 +def verify_garbling_mean_preservation(mu_matrix, Q_matrix, prior): + """ + Verify that the garbling Q is mean-preserving: + E[posterior under ν] = E[posterior under μ]. + Both should equal the prior. + """ + nu_matrix = mu_matrix @ Q_matrix + posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) + posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) + + mean_mu = (posts_mu * probs_mu[:, None]).sum(axis=0) + mean_nu = (posts_nu * probs_nu[:, None]).sum(axis=0) + + print(f"Prior: {prior.round(4)}") + print(f"E[p^μ]: {mean_mu.round(4)}") + print(f"E[p^ν = p^(μQ)]: {mean_nu.round(4)}") + print(f"Both equal prior? mu: {np.allclose(mean_mu, prior)}, " + f"nu: {np.allclose(mean_nu, prior)}") + + +# Q_true maps 2 signals -> 2 signals (a softening garbling) +Q_soft = np.array([[0.7, 0.3], + [0.3, 0.7]]) + +verify_garbling_mean_preservation(mu_info, Q_soft, prior) +``` + +--- + +## Comparing Experiments: A Systematic Example + +We now study a grid of experiments indexed by their quality parameter $\theta$ +and verify that the Blackwell ordering is faithfully reflected in: + +1. The spread of posteriors (mean-preserving spread check). +2. The value of information under every concave $U$. +3. The SOSD ranking of posterior distributions. + +```{code-cell} ipython3 +thetas_grid = [0.1, 0.4, 0.7, 1.0] +prior2 = np.array([0.5, 0.5]) + +fig, axes = plt.subplots(2, 2, figsize=(11, 8)) +axes = axes.flat + +for ax, t in zip(axes, thetas_grid): + mu_t = make_experiment(t) + posts, probs = compute_posteriors(mu_t, prior2) + p_s1 = posts[:, 0] + ax.vlines(p_s1, 0, probs, linewidth=8, color="steelblue", alpha=0.7) + ax.axvline(prior2[0], color="tomato", linestyle="--", linewidth=1.5, + label=f"prior = {prior2[0]:.2f}") + I_H = degroot_value(mu_t, prior2, entropy) + I_G = degroot_value(mu_t, prior2, gini_impurity) + ax.set_title(fr"$\theta = {t}$ | $I_H = {I_H:.3f}$ | $I_G = {I_G:.3f}$", + fontsize=11) + ax.set_xlim(0, 1) + ax.set_xlabel(r"Posterior $p(s_1 \mid x)$", fontsize=11) + ax.set_ylabel("Probability mass", fontsize=11) + ax.legend(fontsize=10) + +fig.suptitle("Distribution of posteriors for experiments of increasing quality\n" + r"$\theta = 0$: uninformative; $\theta = 1$: perfect", + fontsize=12) +plt.tight_layout() +plt.show() +``` + +As $\theta$ rises from 0 (unifomative) to 1 (perfect), posteriors migrate toward the +vertices $\{0, 1\}$, the value of information rises under every $U$, and the +distributions form a chain under the SOSD order. + +--- + +## Application 1 — Product Quality Information (Kihlstrom 1974) + +{cite}`kihlstrom1974a` applies Blackwell's theorem to consumer demand for information +about product quality. + +- The unknown state $\tilde{s}$ is a product parameter $\theta$. +- A consumer can purchase $\lambda$ units of information at cost $c(\lambda)$. +- As $\lambda$ rises, the experiment becomes more informative in the Blackwell sense. + +The Blackwell ordering certifies that "more information is always better" for every +expected-utility maximiser when information is free. + +The consumer's demand for information equates the *marginal value of the standard +experiment* to its *marginal cost*. + +```{code-cell} ipython3 +def consumer_value(theta, prior2, U=entropy, cost_per_unit=0.5): + """ + Value of purchasing experiment quality theta. + Returns gross value I(theta) and net value I(theta) - cost. + """ + mu_t = make_experiment(theta) + gross = degroot_value(mu_t, prior2, U) + net = gross - cost_per_unit * theta + return gross, net + + +thetas_fine = np.linspace(0, 1, 200) +gross_vals = [] +net_vals = [] +marginal_vals = [] + +for t in thetas_fine: + g, n = consumer_value(t, prior2, entropy, cost_per_unit=0.4) + gross_vals.append(g) + net_vals.append(n) + +# Marginal value (numerical derivative) +marginal_vals = np.gradient(gross_vals, thetas_fine) + +fig, axes = plt.subplots(1, 2, figsize=(12, 4)) + +ax = axes[0] +ax.plot(thetas_fine, gross_vals, label="Gross value $I(\\theta)$", + color="steelblue", linewidth=2) +ax.plot(thetas_fine, [0.4 * t for t in thetas_fine], + label="Cost $c \\cdot \\theta$", color="tomato", + linestyle="--", linewidth=2) +ax.plot(thetas_fine, net_vals, label="Net value", color="green", linewidth=2) +ax.set_xlabel(r"Experiment quality $\theta$", fontsize=11) +ax.set_ylabel("Value (Shannon entropy units)", fontsize=11) +ax.set_title("Gross value, cost, and net value of information", fontsize=11) +ax.legend(fontsize=10) + +ax2 = axes[1] +ax2.plot(thetas_fine, marginal_vals, label="Marginal value $I'(\\theta)$", + color="steelblue", linewidth=2) +ax2.axhline(0.4, color="tomato", linestyle="--", linewidth=2, + label="Marginal cost $c = 0.4$") +opt_idx = np.argmin(np.abs(np.array(marginal_vals) - 0.4)) +ax2.axvline(thetas_fine[opt_idx], color="green", linestyle=":", + label=fr"Optimal $\theta^* \approx {thetas_fine[opt_idx]:.2f}$") +ax2.set_xlabel(r"Experiment quality $\theta$", fontsize=11) +ax2.set_ylabel("Marginal value / Marginal cost", fontsize=11) +ax2.set_title("Optimal demand for information:\n" + "MV = MC at optimal $\\theta^*$", fontsize=11) +ax2.legend(fontsize=10) + +plt.tight_layout() +plt.show() +``` + +The optimal demand for information $\theta^*$ occurs where marginal value equals +marginal cost. Both axes shift as the cost $c$ changes, demonstrating comparative +statics. + +--- + +## Application 2 — Sequential Experimental Design (DeGroot 1962) + +{cite}`degroot1962` applies the uncertainty-function framework to **sequential +experimental design**. + +Each period a statistician observes one draw and updates their posterior. The +question is which sequence of experiments minimises cumulative expected uncertainty. + +The Blackwell theorem implies that if one experiment is more informative than another +at every stage, the optimal sequential strategy simply uses the better experiment at +every period. + +We simulate sequential belief updating for experiments of different quality. + +```{code-cell} ipython3 +def sequential_update(mu_matrix, prior, T=20, seed=0): + """ + Simulate T sequential belief updates under experiment mu. + Returns the path of posterior beliefs (T+1, N). + """ + rng = np.random.default_rng(seed) + N, M = mu_matrix.shape + beliefs = np.zeros((T + 1, N)) + beliefs[0] = prior.copy() + + true_state = rng.choice(N, p=prior) + + for t in range(T): + p = beliefs[t] + # Draw a signal from the true state + signal = rng.choice(M, p=mu_matrix[true_state]) + # Bayes update + unnorm = mu_matrix[:, signal] * p + beliefs[t + 1] = unnorm / unnorm.sum() + + return beliefs, true_state + + +def plot_sequential_beliefs(thetas_compare, prior2, T=25): + fig, axes = plt.subplots(1, len(thetas_compare), figsize=(14, 4), sharey=True) + + for ax, theta in zip(axes, thetas_compare): + mu_t = make_experiment(theta, N=2) + for seed in range(15): + beliefs, ts = sequential_update(mu_t, prior2, T=T, seed=seed) + c = "steelblue" if ts == 0 else "darkorange" + ax.plot(beliefs[:, 0], alpha=0.4, color=c, linewidth=1.2) + ax.axhline(prior2[0], linestyle="--", color="gray", linewidth=1, + label="prior") + ax.axhline(1.0, linestyle=":", color="steelblue", linewidth=0.8) + ax.axhline(0.0, linestyle=":", color="darkorange", linewidth=0.8) + ax.set_title(fr"$\theta = {theta}$", fontsize=12) + ax.set_xlabel("Period $t$", fontsize=11) + if theta == thetas_compare[0]: + ax.set_ylabel(r"Posterior $p(s_1 \mid x^t)$", fontsize=11) + ax.set_ylim(-0.05, 1.05) + ax.legend(fontsize=9) + + fig.suptitle("Sequential belief paths under experiments of increasing quality\n" + "Blue = true state $s_1$; Orange = true state $s_2$", + fontsize=11) + plt.tight_layout() + plt.show() + +plot_sequential_beliefs([0.2, 0.5, 0.9], prior2, T=30) +``` + +More informative experiments (larger $\theta$) cause beliefs to converge faster to the +truth. Under the uniform prior and perfectly symmetric experiments, belief paths are +martingales — the law of iterated expectations for beliefs. + +```{code-cell} ipython3 +# Verify the martingale property: E[p_{t+1} | x^t] = p_t +def check_martingale(mu_matrix, prior, T=15, n_paths=2000, seed=0): + """ + Simulate many belief paths and check E[p_{t+1}] ≈ E[p_t]. + Under the true prior, belief sequences are martingales. + """ + rng = np.random.default_rng(seed) + N, M = mu_matrix.shape + all_paths = np.zeros((n_paths, T + 1, N)) + + for k in range(n_paths): + true_state = rng.choice(N, p=prior) + p = prior.copy() + all_paths[k, 0] = p + for t in range(T): + signal = rng.choice(M, p=mu_matrix[true_state]) + unnorm = mu_matrix[:, signal] * p + p = unnorm / unnorm.sum() + all_paths[k, t + 1] = p + + mean_path = all_paths[:, :, 0].mean(axis=0) # E[p(s1)] over paths + + fig, ax = plt.subplots(figsize=(8, 4)) + ax.plot(mean_path, color="steelblue", linewidth=2, + label=r"$\bar p_t(s_1)$ (mean over paths)") + ax.axhline(prior[0], linestyle="--", color="tomato", linewidth=1.5, + label=fr"Prior $p_0 = {prior[0]:.2f}$") + ax.set_xlabel("Period $t$", fontsize=12) + ax.set_ylabel(r"$E[p_t(s_1)]$", fontsize=12) + ax.set_title(r"Belief martingale: $E[p_t(s_1)]$ stays at the prior" + "\n(law of iterated expectations for beliefs)", fontsize=11) + ax.legend(fontsize=11) + ax.set_ylim(0, 1) + plt.tight_layout() + plt.show() + + print(f"Prior = {prior[0]:.4f}") + print(f"Mean belief (averaged over {n_paths} paths and time): " + f"{mean_path.mean():.4f}") + +check_martingale(mu_info, prior, T=20, n_paths=5000) +``` + +The mean posterior tracks the prior throughout — reflecting the law of iterated +expectations applied to beliefs. + +--- + +## Summary + +Blackwell's theorem identifies a **partial order** on statistical experiments with +three equivalent characterisations: + +| Criterion | Condition | +|-----------|-----------| +| **Economic** | Every decision maker prefers $\mu$ to $\nu$: $B(\mu,A) \supseteq B(\nu,A)$ | +| **Sufficiency** | $\nu$ is a garbling of $\mu$: $\nu = \mu Q$ for some Markov $Q$ | +| **Uncertainty** | $\mu$ reduces every concave $U$ more: $E[U(p^\mu)] \leq E[U(p^\nu)]$ | + +Kihlstrom's Bayesian exposition clarifies the theorem's geometry by placing the +**posterior distribution** at the centre: + +- A more informative experiment creates a **more dispersed** distribution of + posteriors — a mean-preserving spread of the posterior distribution induced by + the less informative experiment. +- This links the Blackwell order to **second-order stochastic dominance** on the + probability simplex $P$. +- The uncertainty-function criterion is then transparent: because $U$ is concave, + more dispersed posteriors (mean-preserving spread) correspond to higher expected + $U$ — equivalently, lower expected uncertainty. + +DeGroot's contribution is to extend the criterion from specific utility functions +to the *entire class* of concave uncertainty functions, confirming the full +generality of Blackwell's result. + + From 896a390908876facec982dc998779d25c94c653e Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Tue, 31 Mar 2026 21:42:57 +1100 Subject: [PATCH 02/20] updates --- lectures/_static/quant-econ.bib | 2 +- lectures/blackwell_kihlstrom.md | 1015 +++++++++++++++---------------- 2 files changed, 486 insertions(+), 531 deletions(-) diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index d674fe311..b62cf6ecb 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -71,7 +71,7 @@ @article{kihlstrom1974a number = {4}, pages = {413--439}, year = {1974}, - doi = {10.1016/0022-0531(74)90003-0} + doi = {10.1016/0022-0531(74)90019-2} } @inproceedings{hansen2004certainty, diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index cdb828854..818882885 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -28,56 +28,45 @@ kernelspec: ## Overview -This lecture explains **Blackwell's theorem** {cite}`blackwell1951,blackwell1953` on ranking statistical -experiments, following the Bayesian exposition of {cite}`kihlstrom1984`. +This lecture studies *Blackwell's theorem* {cite}`blackwell1951,blackwell1953` on ranking statistical experiments, following the Bayesian exposition in {cite}`kihlstrom1984`. -Consider two random variables, $\tilde{x}_\mu$ and $\tilde{x}_\nu$, each correlated -with an unknown state $\tilde{s}$. A decision maker wants to know which observation -conveys more information about $\tilde{s}$. +Suppose that two signals, $\tilde{x}_\mu$ and $\tilde{x}_\nu$, are both informative about an unknown state $\tilde{s}$. -Blackwell identified a clean answer: $\tilde{x}_\mu$ is **at least as informative** as -$\tilde{x}_\nu$ if and only if any decision maker who observes $\tilde{x}_\mu$ can do -at least as well (in expected utility) as one who observes $\tilde{x}_\nu$. +Blackwell's question is which signal is more informative. -Remarkably, this economic criterion is equivalent to two purely statistical ones: +Experiment $\mu$ is **at least as informative as** experiment $\nu$ if every Bayesian decision maker can attain weakly higher expected utility with $\mu$ than with $\nu$. -- **Sufficiency** (Blackwell): $\tilde{x}_\mu$ is sufficient for $\tilde{x}_\nu$ — the - distribution of $\tilde{x}_\nu$ can be reproduced by passing $\tilde{x}_\mu$ through - a randomisation. -- **Uncertainty reduction** (DeGroot {cite}`degroot1962`): $\tilde{x}_\mu$ reduces - every concave measure of uncertainty at least as much as $\tilde{x}_\nu$ does. +This economic criterion is equivalent to two statistical criteria: -Kihlstrom's Bayesian restatement places the **posterior distribution** at the centre. -A more informative experiment creates a more dispersed distribution of posteriors — a -**mean-preserving spread** — which links Blackwell's ordering directly to -**second-order stochastic dominance** on the simplex of beliefs. +- *Sufficiency* (Blackwell): $\tilde{x}_\nu$ can be generated from $\tilde{x}_\mu$ by an additional randomization. +- *Uncertainty reduction* (DeGroot {cite}`degroot1962`): $\tilde{x}_\mu$ lowers expected uncertainty at least as much as $\tilde{x}_\nu$ for every concave uncertainty function. -We proceed in the following steps: +Kihlstrom's reformulation places the *posterior distribution* at the center. + +More informative experiments generate posterior distributions that are more dispersed in convex order. + +In the two-state case, this becomes the familiar mean-preserving-spread comparison on $[0, 1]$, which can be checked with the integrated-CDF test used for second-order stochastic dominance. + +The lecture proceeds as follows: 1. Set up notation and define experiments as Markov matrices. 2. Define stochastic transformations (Markov kernels). 3. State the three equivalent criteria. 4. State and sketch the proof of the main theorem. -5. Develop the Bayesian interpretation via standard experiments and mean-preserving - spreads. +5. Develop the Bayesian interpretation via standard experiments and mean-preserving spreads. 6. Illustrate each idea with Python simulations. -Let's start by importing some tools. +We begin with some imports. ```{code-cell} ipython3 import numpy as np import matplotlib.pyplot as plt -import matplotlib.tri as mtri -from scipy.stats import dirichlet, beta as beta_dist from scipy.optimize import minimize -from itertools import product np.random.seed(42) ``` ---- - -## Experiments and Markov Matrices +## Experiments and Markov matrices ### The state space and experiments @@ -98,26 +87,23 @@ $$ Each row $i$ gives the distribution of signals when the true state is $s_i$. ```{code-cell} ipython3 -# Example: two states, three signals -# mu[i, j] = Pr(signal j | state i) -mu = np.array([[0.6, 0.3, 0.1], # state 1: signal is quite informative - [0.1, 0.3, 0.6]]) # state 2: opposite pattern +μ = np.array([[0.6, 0.3, 0.1], + [0.1, 0.3, 0.6]]) -nu = np.array([[0.5, 0.2, 0.3], # coarser experiment - [0.2, 0.5, 0.3]]) +ν = np.array([[0.5, 0.2, 0.3], + [0.2, 0.5, 0.3]]) print("Experiment μ (rows sum to 1):") -print(mu) +print(μ) print("\nExperiment ν:") -print(nu) -print("\nRow sums μ:", mu.sum(axis=1)) -print("Row sums ν:", nu.sum(axis=1)) +print(ν) +print("\nRow sums μ:", μ.sum(axis=1)) +print("Row sums ν:", ν.sum(axis=1)) ``` ### Stochastic transformations (Markov kernels) -A **stochastic transformation** $Q$ maps signals of one experiment to signals of -another by a further randomisation. +A **stochastic transformation** $Q$ maps signals of one experiment to signals of another by further randomization. In the discrete setting with $M$ input signals and $K$ output signals, $Q$ is an $M \times K$ Markov matrix: $q_{lk} \geq 0$ and $\sum_k q_{lk} = 1$ for every row $l$. @@ -135,70 +121,74 @@ meaning that an observer of $\tilde{x}_\mu$ can generate the distribution of $\tilde{x}_\nu$ by passing their signal through $Q$. ``` -The intuition: if you hold the more informative signal $\tilde{x}_\mu$, you can always -*throw away* information to produce a signal distributed like $\tilde{x}_\nu$; -the reverse is impossible. +If you observe the more informative signal $\tilde{x}_\mu$, then you can always *throw away* information to reproduce a less informative signal. + +The reverse is not possible: a less informative signal cannot be enriched to recover what was lost. ```{code-cell} ipython3 def is_markov(M, tol=1e-10): """Check whether a matrix is a valid Markov (row-stochastic) matrix.""" return np.all(M >= -tol) and np.allclose(M.sum(axis=1), 1.0) -def find_stochastic_transform(mu, nu, tol=1e-8): + +def find_stochastic_transform(μ, ν, tol=1e-8): """ - Try to find Q such that nu ≈ mu @ Q using least-squares then project. - Returns Q and the residual ||nu - mu @ Q||. - This is a simple demonstration for the discrete finite case. + Find a row-stochastic matrix Q that minimizes ||ν - μ @ Q||. """ - N, M = mu.shape - _, K = nu.shape - - # Solve nu = mu @ Q column by column using non-negative least squares - from scipy.optimize import lsq_linear - - Q = np.zeros((M, K)) - for k in range(K): - b = nu[:, k] - # Constraints: Q[:, k] >= 0, sum(Q[:, k]) = 1 - # Use lsq_linear with bounds, ignoring sum constraint for now - result = lsq_linear(mu, b, bounds=(0, np.inf)) - Q[:, k] = result.x - - # Normalise rows so Q is row-stochastic - row_sums = Q.sum(axis=1, keepdims=True) - row_sums = np.where(row_sums == 0, 1, row_sums) - Q = Q / row_sums - residual = np.linalg.norm(nu - mu @ Q) - return Q, residual - -# Build a ν that is deliberately a garbling of μ -# Q maps 3 signals -> 2 signals (merge signals 2&3) + _, M = μ.shape + _, K = ν.shape + + def unpack(q_flat): + return q_flat.reshape(M, K) + + def objective(q_flat): + Q = unpack(q_flat) + return np.linalg.norm(ν - μ @ Q)**2 + + constraints = [ + {"type": "eq", "fun": lambda q_flat, + row=i: unpack(q_flat)[row].sum() - 1.0} + for i in range(M) + ] + bounds = [(0.0, 1.0)] * (M * K) + Q0 = np.full((M, K), 1 / K).ravel() + + result = minimize( + objective, + Q0, + method="SLSQP", + bounds=bounds, + constraints=constraints, + options={"ftol": tol, "maxiter": 1_000}, + ) + + Q = unpack(result.x) + residual = np.linalg.norm(ν - μ @ Q) + return Q, residual, result.success + Q_true = np.array([[1.0, 0.0], [0.0, 1.0], [0.0, 1.0]]) -nu_garbled = mu @ Q_true +ν_garbled = μ @ Q_true print("ν = μ @ Q_true:") -print(nu_garbled) -print("ν is Markov:", is_markov(nu_garbled)) +print(ν_garbled) +print("ν is Markov:", is_markov(ν_garbled)) -Q_found, res = find_stochastic_transform(mu, nu_garbled) -print(f"\nRecovered Q (residual = {res:.2e}):") +Q_found, res, success = find_stochastic_transform(μ, ν_garbled) +print(f"\nRecovered Q (success = {success}, residual = {res:.2e}):") print(np.round(Q_found, 4)) print("Rows of Q sum to:", Q_found.sum(axis=1).round(4)) ``` ---- - -## Three Equivalent Criteria for "More Informative" +## Three equivalent criteria -### Criterion 1 — The Economic Criterion +### Criterion 1: the economic criterion Let $A$ be a compact convex set of actions and $u: A \times S \to \mathbb{R}$ a bounded utility function. -A decision maker observes $x \in X$, applies Bayes' rule to update beliefs about -$\tilde{s}$, and chooses $d(x) \in A$ to maximise expected utility. +A decision maker observes $x \in X$, updates beliefs about $\tilde{s}$ by Bayes' rule, and chooses $d(x) \in A$ to maximize expected utility. Let $p = (p_1, \ldots, p_N)$ be the prior over states, and write @@ -208,34 +198,40 @@ $$ for the probability simplex. -The set of **achievable expected-utility vectors** under experiment $\mu$ is +For fixed $A$ and $u$, the set of **achievable expected-utility vectors** under experiment $\mu$ is $$ -B(\mu, A) = \Bigl\{v \in \mathbb{R}^N : +B(\mu, A, u) = \Bigl\{v \in \mathbb{R}^N : v_i = \textstyle\int_X u(f(x), s_i)\,\mu_i(dx) \text{ for some measurable } f: X \to A \Bigr\}. $$ -```{admonition} Definition (Economic Criterion — Bonnenblust–Shapley–Sherman) +```{admonition} Definition (Economic criterion) :class: tip $\mu$ is **at least as informative as** $\nu$ in the economic sense if $$ -B(\mu, A) \supseteq B(\nu, A) +B(\mu, A, u) \supseteq B(\nu, A, u) $$ -for every compact convex action set $A$ and every prior $p \in P$. +for every compact convex action set $A$ and every bounded utility function $u: A \times S \to \mathbb{R}$. ``` -Equivalently, every rational decision maker weakly prefers to observe $\tilde{x}_\mu$ -over $\tilde{x}_\nu$. +This criterion says that experiment $\mu$ is better than experiment $\nu$ if anything a decision maker can achieve after seeing $\nu$, they can also achieve after seeing $\mu$. + +The reason is that a more informative experiment lets the agent imitate a less informative one by *ignoring* or *garbling* some of the extra information. + +But the reverse need not be possible. -### Criterion 2 — The Sufficiency Criterion (Blackwell) +So $B(\mu, A, u) \supseteq B(\nu, A, u)$ means that $\mu$ gives the decision maker at least as many feasible expected-utility outcomes as $\nu$. -```{admonition} Definition (Blackwell Sufficiency) +Equivalently, every Bayesian decision maker attains weakly higher expected utility with $\tilde{x}_\mu$ than with $\tilde{x}_\nu$, for every prior $p \in P$. + +### Criterion 2: the sufficiency criterion + +```{admonition} Definition (Blackwell sufficiency) :class: tip -$\mu \geq \nu$ in Blackwell's sense if there exists a stochastic transformation -$Q: X \to Y$ such that +$\mu \geq \nu$ in Blackwell's sense if there exists a stochastic transformation $Q$ from the signal space of $\mu$ to the signal space of $\nu$ such that $$ \nu_i(E) = (Q \circ \mu_i)(E) @@ -245,10 +241,9 @@ $$ In matrix notation for finite experiments: $\nu = \mu \, Q$. -### Criterion 3 — The Uncertainty-Function Criterion (DeGroot) +### Criterion 3: the uncertainty criterion -{cite}`degroot1962` calls any **concave** function $U: P \to \mathbb{R}$ an -**uncertainty function**. +{cite:t}`degroot1962` calls any concave function $U: P \to \mathbb{R}$ an **uncertainty function**. The prototypical example is Shannon entropy: @@ -256,67 +251,71 @@ $$ U(p) = -\sum_{i=1}^{N} p_i \log p_i. $$ -```{admonition} Definition (DeGroot Uncertainty Criterion) +```{admonition} Definition (DeGroot uncertainty criterion) :class: tip -$\mu$ **reduces expected uncertainty at least as much as** $\nu$ if, for every -concave $U: P \to \mathbb{R}$, +$\mu$ **reduces expected uncertainty at least as much as** $\nu$ if, for every prior $p \in P$ and every concave $U: P \to \mathbb{R}$, $$ -\int_P U(p)\,\hat\mu^c(dp) +\int_P U(q)\,\hat\mu^p(dq) \;\leq\; -\int_P U(p)\,\hat\nu^c(dp), +\int_P U(q)\,\hat\nu^p(dq), $$ -where $\hat\mu^c$ is the distribution of the posterior induced by experiment $\mu$ -starting from the uniform prior $c = (1/N, \ldots, 1/N)$. +where $\hat\mu^p$ is the distribution of posterior beliefs induced by experiment $\mu$ under prior $p$. ``` -Jensen's inequality guarantees that observing any signal *weakly* reduces expected -uncertainty ($\int U(p^\mu)\,d\hat\mu^c \leq U(c)$). The criterion asks whether $\mu$ -always reduces it *at least as much* as $\nu$. +To see this, let $Q = p^\mu(X)$ denote the random posterior induced by experiment $\mu$. ---- +Then $Q$ has distribution $\hat\mu^p$, so + +$$ +\mathbb{E}[U(Q)] = \int_P U(q)\,\hat\mu^p(dq). +$$ -## The Main Theorem +Since $U$ is concave, Jensen's inequality gives -```{admonition} Theorem (Blackwell 1951, 1953; Bonnenblust et al. 1949; DeGroot 1962) +$$ +\mathbb{E}[U(Q)] \leq U(\mathbb{E}[Q]) = U(p). +$$ + +Hence + +$$ +\int_P U(q)\,\hat\mu^p(dq) \leq U(p), +$$ + +so any experiment weakly lowers expected uncertainty. + +Kihlstrom's standard-experiment construction will later let us compare posterior distributions under the uniform prior $c = (1 / N, \ldots, 1 / N)$. + +## The main theorem + +```{admonition} Theorem (Blackwell 1953; see also Blackwell 1951, Bonnenblust et al. 1949, and DeGroot 1962) :class: important The following three conditions are equivalent: -(i) **Economic criterion:** $B(\mu, A) \supseteq B(\nu, A)$ for every compact - convex $A$ and every prior $p \in P$. +(i) Economic criterion: $B(\mu, A, u) \supseteq B(\nu, A, u)$ for every compact convex $A$ and every bounded utility function $u$. -(ii) **Sufficiency criterion:** There exists a stochastic transformation $Q$ from $X$ - to $Y$ such that $\nu = Q \circ \mu$. +(ii) Sufficiency criterion: There exists a stochastic transformation $Q$ from the signal space of $\mu$ to the signal space of $\nu$ such that $\nu = Q \circ \mu$. -(iii) **Uncertainty criterion:** $\int_P U(p)\,\hat\mu^c(dp) \leq \int_P U(p)\,\hat\nu^c(dp)$ - for every concave $U$ and the uniform prior $c$. +(iii) Uncertainty criterion: $\int_P U(q)\,\hat\mu^p(dq) \leq \int_P U(q)\,\hat\nu^p(dq)$ for every prior $p \in P$ and every concave $U$. ``` -The proof establishes the chain -(i) $\Leftrightarrow$ (ii) $\Leftrightarrow$ (iii). +The hard part is the equivalence between the economic and sufficiency criteria. -**Sketch (ii $\Rightarrow$ i):** If $\nu = \mu Q$, any decision rule $f$ for $\tilde{x}_\nu$ -can be replicated by first observing $\tilde{x}_\mu$, drawing $\tilde{x}_\nu \sim Q(\tilde{x}_\mu, \cdot)$, -then applying $f$. Hence $B(\nu, A) \subseteq B(\mu, A)$. +*Sketch (ii $\Rightarrow$ i):* If $\nu = \mu Q$, then any decision rule based on $\tilde{x}_\nu$ can be replicated by first observing $\tilde{x}_\mu$, then drawing a synthetic $\tilde{x}_\nu$ from $Q$, and then applying the same rule. -**Sketch (i $\Rightarrow$ ii):** This uses a separating-hyperplane argument. Since -$B(\mu, A) \supseteq B(\nu, A)$ for every $A$, standard duality implies the existence -of a mean-preserving stochastic transformation $D$ mapping posteriors of $\nu$ to -posteriors of $\mu$, which constructs the required $Q$. +*Sketch (i $\Rightarrow$ ii):* Since $B(\mu, A, u) \supseteq B(\nu, A, u)$ for every $A$ and $u$, a separating-hyperplane (duality) argument implies the existence of a mean-preserving stochastic transformation $D$ mapping posteriors of $\nu$ to posteriors of $\mu$, which constructs the required $Q$. -**Sketch (ii $\Leftrightarrow$ iii):** Given $Q$, Jensen's inequality applied to any -concave $U$ gives $\mathbb{E}[U(p^\mu)] \leq \mathbb{E}[U(p^\nu)]$. The converse — -that the condition for all concave $U$ forces the existence of $Q$ — is proved in -{cite}`blackwell1953`. +*Sketch (ii $\Rightarrow$ iii):* Under a garbling, the posterior from the coarser experiment is the conditional expectation of the posterior from the finer experiment, so Jensen's inequality gives the result for every concave $U$. ---- +*Sketch (iii $\Rightarrow$ ii):* The converse — that the inequality for all concave $U$ forces the existence of $Q$ — is proved in {cite}`blackwell1953`, and Kihlstrom's posterior-based representation makes the geometry transparent. -## Kihlstrom's Bayesian Interpretation +## Kihlstrom's Bayesian interpretation ### Posteriors and standard experiments -The central object in Kihlstrom's analysis is the **posterior belief vector**. +The key object in Kihlstrom's analysis is the *posterior belief vector*. When prior $p$ holds and experiment $\mu$ produces signal $x$, Bayes' rule gives @@ -325,7 +324,7 @@ p_i^\mu(x) = \Pr(\tilde{s} = s_i \mid \tilde{x}_\mu = x) = \frac{\mu_{ix} \, p_i}{\sum_j \mu_{jx}\, p_j}, \qquad i = 1, \ldots, N. $$ -The posterior $p^\mu(x) \in P$ is a *random variable* on the simplex. +The posterior $p^\mu(x) \in P$ is a random point in the simplex. ```{admonition} Key property (mean preservation) :class: note @@ -335,37 +334,34 @@ $$ \mathbb{E}[p^\mu] = \sum_x \Pr(\tilde{x}_\mu = x)\, p^\mu(x) = p. $$ -This is sometimes called the **law of iterated expectations for beliefs**. +This is sometimes called the *law of iterated expectations for beliefs*. ``` -The **standard experiment** ${}^c\mu^*$ records only the posterior: it maps the -prior $c$ to the random variable $p^\mu(x) \in P$. Its distribution $\hat\mu^c$ -on $P$ satisfies $\int_P p\;\hat\mu^c(dp) = c$. +For a fixed prior $c$, Kihlstrom's **standard experiment** ${}^c\mu^*$ records only the posterior generated by $\mu$. + +Its distribution $\hat\mu^c$ on $P$ satisfies $\int_P q \, \hat\mu^c(dq) = c$. -Two experiments are **informationally equivalent** when they induce the same -distribution of posteriors. The standard experiment is the minimal sufficient -statistic for $\mu$. +Two experiments are **informationally equivalent** when they induce the same posterior distribution. + +The standard experiment strips away every detail of the signal except its posterior, so it is a *minimal sufficient statistic* for the comparison of experiments. + +Any two experiments that generate the same distribution over posteriors lead to identical decisions for every Bayesian decision maker, regardless of how different their raw signal spaces may look. ```{code-cell} ipython3 -def compute_posteriors(mu, prior): +def compute_posteriors(μ, prior, tol=1e-14): """ Compute the posterior distribution for each signal realisation. - - Parameters - ---------- - mu : (N, M) Markov matrix — mu[i, j] = Pr(signal j | state i) - prior : (N,) prior probabilities over states - - Returns - ------- - posteriors : (M, N) array — posteriors[j] = posterior given signal j - signal_probs : (M,) marginal probability of each signal """ - N, M = mu.shape - # Marginal probability of each signal: Pr(x_j) = sum_i Pr(x_j|s_i)*p_i - signal_probs = mu.T @ prior # shape (M,) - # Posterior: p(s_i | x_j) = mu[i,j]*p[i] / Pr(x_j) - posteriors = (mu.T * prior) / signal_probs[:, None] # shape (M, N) + N, M = μ.shape + signal_probs = μ.T @ prior + numerators = μ.T * prior + posteriors = np.zeros((M, N)) + np.divide( + numerators, + signal_probs[:, None], + out=posteriors, + where=signal_probs[:, None] > tol, + ) return posteriors, signal_probs @@ -375,142 +371,136 @@ def check_mean_preservation(posteriors, signal_probs, prior): return expected_posterior, np.allclose(expected_posterior, prior) -# Two-state example: states s1, s2 N = 2 prior = np.array([0.5, 0.5]) -# More informative experiment μ -mu_info = np.array([[0.8, 0.2], - [0.2, 0.8]]) +μ_info = np.array([[0.8, 0.2], + [0.2, 0.8]]) -# Less informative experiment ν -nu_info = np.array([[0.6, 0.4], - [0.4, 0.6]]) +ν_info = np.array([[0.6, 0.4], + [0.4, 0.6]]) -post_mu, probs_mu = compute_posteriors(mu_info, prior) -post_nu, probs_nu = compute_posteriors(nu_info, prior) +post_μ, probs_μ = compute_posteriors(μ_info, prior) +post_ν, probs_ν = compute_posteriors(ν_info, prior) print("=== Experiment μ (more informative) ===") -print("Signal probabilities:", probs_mu.round(3)) +print("Signal probabilities:", probs_μ.round(3)) print("Posteriors (row = signal, col = state):") -print(post_mu.round(3)) -mean_mu, ok_mu = check_mean_preservation(post_mu, probs_mu, prior) -print(f"E[posterior] = {mean_mu.round(4)} (equals prior: {ok_mu})") +print(post_μ.round(3)) +mean_μ, ok_μ = check_mean_preservation(post_μ, probs_μ, prior) +print(f"E[posterior] = {mean_μ.round(4)} (equals prior: {ok_μ})") print("\n=== Experiment ν (less informative) ===") -print("Signal probabilities:", probs_nu.round(3)) +print("Signal probabilities:", probs_ν.round(3)) print("Posteriors:") -print(post_nu.round(3)) -mean_nu, ok_nu = check_mean_preservation(post_nu, probs_nu, prior) -print(f"E[posterior] = {mean_nu.round(4)} (equals prior: {ok_nu})") +print(post_ν.round(3)) +mean_ν, ok_ν = check_mean_preservation(post_ν, probs_ν, prior) +print(f"E[posterior] = {mean_ν.round(4)} (equals prior: {ok_ν})") ``` -### Visualising posterior distributions on the simplex +### Visualizing posterior distributions on the simplex For $N = 2$ states, the simplex $P$ is the unit interval $[0, 1]$ (the probability of state $s_1$). We can directly plot the distribution of posteriors under experiments $\mu$ and $\nu$. ```{code-cell} ipython3 -def plot_posterior_distributions(mu_matrix, nu_matrix, prior, +--- +mystnb: + figure: + caption: Posterior distributions in the two-state case + name: fig-blackwell-two-state-posteriors +--- +def plot_posterior_distributions(μ_matrix, ν_matrix, prior, labels=("μ (more informative)", "ν (less informative)")): """ For a two-state experiment, plot the distribution of posteriors (i.e., the standard experiment distribution) on [0,1]. """ - posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) - posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) + posts_μ, probs_μ = compute_posteriors(μ_matrix, prior) + posts_ν, probs_ν = compute_posteriors(ν_matrix, prior) fig, axes = plt.subplots(1, 2, figsize=(11, 4), sharey=False) prior_val = prior[0] for ax, posts, probs, label in zip( - axes, [posts_mu, posts_nu], [probs_mu, probs_nu], labels): - p_s1 = posts[:, 0] # posterior prob of state s1 for each signal + axes, [posts_μ, posts_ν], [probs_μ, probs_ν], labels): + p_s1 = posts[:, 0] ax.vlines(p_s1, 0, probs, linewidth=6, color="steelblue", alpha=0.7) - ax.axvline(prior_val, color="tomato", linestyle="--", linewidth=1.5, + ax.axvline(prior_val, color="tomato", linestyle="--", linewidth=2, label=f"prior = {prior_val:.2f}") ax.set_xlim(0, 1) - ax.set_xlabel(r"Posterior $p(s_1 \mid x)$", fontsize=12) - ax.set_ylabel("Probability mass", fontsize=12) - ax.set_title(label, fontsize=12) - ax.legend() - # Annotate mean + ax.set_xlabel(r"posterior $p(s_1 \mid x)$", fontsize=12) + ax.set_ylabel("probability mass", fontsize=12) mean_post = (p_s1 * probs).sum() - ax.axvline(mean_post, color="green", linestyle=":", linewidth=1.5, + ax.axvline(mean_post, color="green", linestyle=":", linewidth=2, label=f"E[post] = {mean_post:.2f}") + ax.text(0.03, 0.94, label, transform=ax.transAxes, va="top") ax.legend() - fig.suptitle("Distribution of posteriors (standard experiment)\n" - "More informative → more dispersed from prior", - fontsize=12, y=1.02) plt.tight_layout() plt.show() -plot_posterior_distributions(mu_info, nu_info, prior) +plot_posterior_distributions(μ_info, ν_info, prior) ``` -The more informative experiment $\mu$ pushes posteriors further from the prior in -both directions, producing a more dispersed distribution on $[0,1]$. +The more informative experiment $\mu$ pushes posteriors farther from the prior in both directions. -### Mean-preserving spreads and Blackwell's ordering +### Mean-preserving spreads and Blackwell's order -Kihlstrom's key reformulation is: +Kihlstrom's key reformulation is the following. ```{admonition} Theorem (Kihlstrom's Reformulation) :class: important -$\mu \geq \nu$ in Blackwell's sense **if and only if** $\hat\mu^c$ is a +$\mu \geq \nu$ in Blackwell's sense if and only if $\hat\mu^c$ is a **mean-preserving spread** of $\hat\nu^c$; that is, $$ \int_P g(p)\,\hat\mu^c(dp) \;\geq\; \int_P g(p)\,\hat\nu^c(dp) $$ -for every **convex** function $g: P \to \mathbb{R}$. +for every convex function $g: P \to \mathbb{R}$. ``` -Equivalently, $\hat\mu^c$ **second-order stochastically dominates** $\hat\nu^c$ -(in the sense of mean-preserving spreads). +Equivalently, $\hat\mu^c$ is larger than $\hat\nu^c$ in convex order. -The intuition: a better experiment resolves more uncertainty, spreading posteriors -further from the prior on average. Any convex $g$ assigns higher expected value to -a more dispersed distribution (Jensen's inequality in reverse). +A better experiment spreads posterior beliefs farther from the prior while preserving their mean. ```{code-cell} ipython3 -def check_mps_convex_functions(mu_matrix, nu_matrix, prior, n_functions=200): +--- +mystnb: + figure: + caption: Convex-order check in the two-state case + name: fig-blackwell-convex-order-check +--- +def check_mps_convex_functions(μ_matrix, ν_matrix, prior, n_functions=200): """ - Verify the mean-preserving spread condition for random convex functions. - - We test: E[g(p^μ)] >= E[g(p^ν)] for convex g - using a family of convex functions g(p) = (p - t)^+ = max(p-t, 0). + Verify the mean-preserving spread condition using + convex functions g(p) = max(p - t, 0). """ - posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) - posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) + posts_μ, probs_μ = compute_posteriors(μ_matrix, prior) + posts_ν, probs_ν = compute_posteriors(ν_matrix, prior) - p_mu = posts_mu[:, 0] # posteriors on s1 - p_nu = posts_nu[:, 0] + p_μ = posts_μ[:, 0] + p_ν = posts_ν[:, 0] thresholds = np.linspace(0, 1, n_functions) diffs = [] for t in thresholds: - Eg_mu = (np.maximum(p_mu - t, 0) * probs_mu).sum() - Eg_nu = (np.maximum(p_nu - t, 0) * probs_nu).sum() - diffs.append(Eg_mu - Eg_nu) + Eg_μ = (np.maximum(p_μ - t, 0) * probs_μ).sum() + Eg_ν = (np.maximum(p_ν - t, 0) * probs_ν).sum() + diffs.append(Eg_μ - Eg_ν) fig, ax = plt.subplots(figsize=(8, 4)) ax.plot(thresholds, diffs, color="steelblue", linewidth=2) - ax.axhline(0, color="tomato", linestyle="--") + ax.axhline(0, color="tomato", linestyle="--", linewidth=2) ax.fill_between(thresholds, diffs, 0, where=np.array(diffs) >= 0, alpha=0.25, color="steelblue", - label=r"$E[g(p^\mu)] - E[g(p^\nu)] \geq 0$") - ax.set_xlabel("Threshold $t$", fontsize=12) + label="$E[g(p^μ)] - E[g(p^ν)] \\geq 0$") + ax.set_xlabel("threshold $t$", fontsize=12) ax.set_ylabel(r"$E[\max(p-t,0)]$ difference", fontsize=12) - ax.set_title( - r"Mean-preserving spread check: $E[g(p^\mu)] \geq E[g(p^\nu)]$" - "\nfor convex functions $g(p) = \max(p - t, 0)$", - fontsize=11) ax.legend(fontsize=11) plt.tight_layout() plt.show() @@ -519,64 +509,60 @@ def check_mps_convex_functions(mu_matrix, nu_matrix, prior, n_functions=200): print(f"μ is a mean-preserving spread of ν: {all_non_negative}") return diffs -_ = check_mps_convex_functions(mu_info, nu_info, prior) +_ = check_mps_convex_functions(μ_info, ν_info, prior) ``` ---- - -## Simulating the Blackwell Ordering with Many States +## Simulating the Blackwell order with many states -To move beyond two states we simulate richer experiments. +We now move to a three-state example. -We take $N = 3$ states and compare a more-informative experiment $\mu$ (whose -signal is strongly correlated with the state) against a less-informative $\nu$ -(a garbling of $\mu$). +Experiment $\mu$ is strongly correlated with the state, and experiment $\nu$ is a garbling of $\mu$. ```{code-cell} ipython3 -# Three states, three signals N3 = 3 prior3 = np.array([1/3, 1/3, 1/3]) -# Informative experiment: strong diagonal -mu3 = np.array([[0.7, 0.2, 0.1], - [0.1, 0.7, 0.2], - [0.2, 0.1, 0.7]]) +μ3 = np.array([[0.7, 0.2, 0.1], + [0.1, 0.7, 0.2], + [0.2, 0.1, 0.7]]) -# Garbling matrix: merge signals 2 and 3 Q3 = np.array([[0.9, 0.05, 0.05], [0.05, 0.8, 0.15], - [0.05, 0.15, 0.8]]) # row-stochastic + [0.05, 0.15, 0.8]]) -nu3 = mu3 @ Q3 +ν3 = μ3 @ Q3 print("μ (3×3):") -print(np.round(mu3, 2)) +print(np.round(μ3, 2)) print("\nQ (garbling):") print(np.round(Q3, 2)) print("\nν = μ @ Q:") -print(np.round(nu3, 3)) +print(np.round(ν3, 3)) ``` -### Plotting posterior clouds on the 2-simplex -For three states, posteriors live in a 2-simplex (a triangle). We draw many -samples from each experiment and plot where the posteriors land. +For three states, posterior beliefs live in a 2-simplex. + +Let's visualize the posterior clouds under $\mu$ and $\nu$ ```{code-cell} ipython3 -def sample_posteriors(mu_matrix, prior, n_draws=3000): +--- +mystnb: + figure: + caption: Posterior clouds on the 2-simplex + name: fig-blackwell-simplex-clouds +--- +def sample_posteriors(μ_matrix, prior, n_draws=3000): """ Simulate n_draws observations from the experiment and compute the resulting posterior beliefs. Returns array of shape (n_draws, N). """ - N, M = mu_matrix.shape - # Draw a state + N, M = μ_matrix.shape states = np.random.choice(N, size=n_draws, p=prior) - # Draw a signal conditioned on the state - signals = np.array([np.random.choice(M, p=mu_matrix[s]) for s in states]) - # Compute posterior - posteriors, signal_probs = compute_posteriors(mu_matrix, prior) - return posteriors[signals] # shape (n_draws, N) + signals = np.array([np.random.choice(M, p=μ_matrix[s]) for s in states]) + posteriors, _ = compute_posteriors(μ_matrix, prior) + return posteriors[signals] def simplex_to_cart(pts): @@ -587,27 +573,26 @@ def simplex_to_cart(pts): return pts @ corners -def plot_simplex_posteriors(mu_matrix, nu_matrix, prior3, n_draws=3000): - posts_mu = sample_posteriors(mu_matrix, prior3, n_draws) - posts_nu = sample_posteriors(nu_matrix, prior3, n_draws) +def plot_simplex_posteriors(μ_matrix, ν_matrix, prior3, n_draws=3000): + posts_μ = sample_posteriors(μ_matrix, prior3, n_draws) + posts_ν = sample_posteriors(ν_matrix, prior3, n_draws) - cart_mu = simplex_to_cart(posts_mu) - cart_nu = simplex_to_cart(posts_nu) + cart_μ = simplex_to_cart(posts_μ) + cart_ν = simplex_to_cart(posts_ν) prior_cart = simplex_to_cart(prior3[None, :])[0] corners = np.array([[0.0, 0.0], [1.0, 0.0], [0.5, np.sqrt(3)/2]]) - triangle = plt.Polygon(corners, fill=False, edgecolor="black", linewidth=1.5) fig, axes = plt.subplots(1, 2, figsize=(12, 5)) - titles = ["μ (more informative)", "ν (less informative / garbled)"] - data = [(cart_mu, "steelblue"), (cart_nu, "darkorange")] + panel_labels = ["μ (more informative)", "ν (garbled)"] + data = [(cart_μ, "steelblue"), (cart_ν, "darkorange")] labels = ["$s_1$", "$s_2$", "$s_3$"] - offsets = [(-0.07, -0.05), (1.02, -0.05), (0.48, np.sqrt(3)/2 + 0.03)] + offsets = [(-0.07, -0.05), (0.02, -0.05), (-0.02, 0.03)] - for ax, (cart, c), title in zip(axes, data, titles): - tri = plt.Polygon(corners, fill=False, edgecolor="black", linewidth=1.5) + for ax, (cart, c), panel_label in zip(axes, data, panel_labels): + tri = plt.Polygon(corners, fill=False, edgecolor="black", linewidth=2) ax.add_patch(tri) ax.scatter(cart[:, 0], cart[:, 1], s=4, alpha=0.25, color=c) ax.scatter(*prior_cart, s=120, color="red", zorder=5, @@ -618,34 +603,32 @@ def plot_simplex_posteriors(mu_matrix, nu_matrix, prior3, n_draws=3000): ax.set_xlim(-0.15, 1.15) ax.set_ylim(-0.1, np.sqrt(3)/2 + 0.1) ax.set_aspect("equal") - ax.axis("off") - ax.set_title(title, fontsize=12) + ax.set_xticks([]) + ax.set_yticks([]) + ax.text(0.03, 0.94, panel_label, transform=ax.transAxes, va="top") ax.legend(fontsize=11, loc="upper right") - fig.suptitle("Posterior clouds on the 2-simplex\n" - "More informative experiment → posteriors spread further from prior", - fontsize=12) plt.tight_layout() plt.show() -plot_simplex_posteriors(mu3, nu3, prior3) +plot_simplex_posteriors(μ3, ν3, prior3) ``` -The posteriors under $\mu$ cluster near the vertices (near-certain beliefs), while -those under the garbled $\nu$ cluster closer to the centre (the prior). +Under $\mu$, the posterior cloud reaches farther toward the vertices. ---- +Under the garbled experiment $\nu$, the cloud stays closer to the center. -## The DeGroot Uncertainty Function +## The DeGroot uncertainty function ### Concave uncertainty functions and the value of information -{cite}`degroot1962` formalises the value of information through an **uncertainty function** -$U: P \to \mathbb{R}$ that must be: +{cite}`degroot1962` formalizes the value of information through an **uncertainty function** $U: P \to \mathbb{R}$. + +In DeGroot's axiomatization, an uncertainty function is: -- **Concave**: by Jensen, observing any signal weakly reduces expected uncertainty. -- **Symmetric**: depends on the components of $p$, not their labelling. -- **Normalised**: maximised at $p = (1/N, \ldots, 1/N)$ and minimised at vertices. +- *Concave*: by Jensen, observing any signal weakly reduces expected uncertainty. +- *Symmetric*: it depends on the components of $p$, not their labeling. +- *Normalized*: it is maximized at $p = (1/N, \ldots, 1/N)$ and minimized at vertices. The **value of experiment $\mu$ given prior $p$** is @@ -654,9 +637,9 @@ I(\tilde{x}_\mu;\, \tilde{s};\, U) = U(p) - \mathbb{E}[U(p^\mu)], $$ -the expected reduction in uncertainty. A key result is that $\mu \geq \nu$ **if and -only if** $I(\tilde{x}_\mu; \tilde{s}; U) \geq I(\tilde{x}_\nu; \tilde{s}; U)$ for -**every** concave $U$. +This quantity is the expected reduction in uncertainty. + +Blackwell's order is equivalent to the statement that $I(\tilde{x}_\mu; \tilde{s}; U) \geq I(\tilde{x}_\nu; \tilde{s}; U)$ for *every* concave $U$. ### Shannon entropy as a special case @@ -673,71 +656,75 @@ I(\tilde{x}_\mu, c;\, U_H) = \log N - H(\tilde{s} \mid \tilde{x}_\mu), $$ -where $H(\tilde{s} \mid \tilde{x}_\mu)$ is the conditional entropy of the state given -the signal — exactly the **mutual information** between $\tilde{x}_\mu$ and $\tilde{s}$. +where $H(\tilde{s} \mid \tilde{x}_\mu)$ is the conditional entropy of the state given the signal. + +To see why, write $H(\tilde{s} \mid \tilde{x}_\mu) = \sum_x \Pr(\tilde{x}_\mu = x) \, H(\tilde{s} \mid \tilde{x}_\mu = x)$, where each conditional entropy term equals $-\sum_i p_i^\mu(x) \log p_i^\mu(x) = U_H(p^\mu(x))$. + +Substituting into DeGroot's formula gives $I = U_H(c) - \mathbb{E}[U_H(p^\mu)] = \log N - H(\tilde{s} \mid \tilde{x}_\mu)$, which is exactly the *mutual information* between $\tilde{x}_\mu$ and $\tilde{s}$. ```{note} -The Blackwell ordering implies the entropy-based inequality, but the *converse fails*: -entropy alone does not pin down the full Blackwell ordering — you need the inequality -for **every** concave $U$. +The Blackwell ordering implies the entropy-based inequality, but the *converse fails*: entropy alone does not pin down the full Blackwell ordering. + +Two experiments can have the same mutual information yet differ in Blackwell rank, because a single concave function cannot detect all differences in the dispersion of posteriors. + +The full Blackwell ordering requires the inequality to hold for *every* concave $U$, not just Shannon entropy. ``` ```{code-cell} ipython3 -def entropy(p, eps=1e-12): +def entropy(p, ε=1e-12): """Shannon entropy of a probability vector.""" p = np.asarray(p, dtype=float) - p = np.clip(p, eps, 1.0) + p = np.clip(p, ε, 1.0) return -np.sum(p * np.log(p)) -def degroot_value(mu_matrix, prior, U_func): +def degroot_value(μ_matrix, prior, U_func): """ Compute DeGroot's value of information I = U(prior) - E[U(posterior)]. """ - posts, probs = compute_posteriors(mu_matrix, prior) + posts, probs = compute_posteriors(μ_matrix, prior) prior_uncertainty = U_func(prior) expected_post_uncertainty = sum( probs[j] * U_func(posts[j]) for j in range(len(probs))) return prior_uncertainty - expected_post_uncertainty -# --- Several concave uncertainty functions --- def gini_impurity(p): """Gini impurity: 1 - sum(p_i^2).""" return 1.0 - np.sum(np.asarray(p)**2) + def tsallis_entropy(p, q=2): """Tsallis entropy of order q (concave for q>1).""" p = np.clip(p, 1e-12, 1.0) return (1 - np.sum(p**q)) / (q - 1) -def min_entropy(p): - """Min-entropy: -log(max(p)).""" - return -np.log(np.max(np.clip(p, 1e-12, 1.0))) + +def sqrt_index(p): + """Concave uncertainty index based on sum(sqrt(p_i)).""" + p = np.clip(np.asarray(p), 0.0, 1.0) + return np.sum(np.sqrt(p)) - 1.0 uncertainty_functions = { "Shannon entropy": entropy, "Gini impurity": gini_impurity, "Tsallis (q=2)": tsallis_entropy, - "Min-entropy": min_entropy, + "Square-root index": sqrt_index, } -print(f"{'Uncertainty function':<22} {'I(μ)':<10} {'I(ν)':<10} {'I(μ)≥I(ν)?'}") +print(f"{'Uncertainty function':<22} {'I(μ)':<10} {'I(ν)':<10} {'I(μ)>=I(ν)?'}") print("-" * 58) for name, U in uncertainty_functions.items(): - I_mu = degroot_value(mu_info, prior, U) - I_nu = degroot_value(nu_info, prior, U) - print(f"{name:<22} {I_mu:<10.4f} {I_nu:<10.4f} {I_mu >= I_nu - 1e-10}") + I_μ = degroot_value(μ_info, prior, U) + I_ν = degroot_value(ν_info, prior, U) + print(f"{name:<22} {I_μ:<10.4f} {I_ν:<10.4f} {I_μ >= I_ν - 1e-10}") ``` -As predicted by the theorem, $I(\mu) \geq I(\nu)$ for every concave uncertainty -function once we know $\mu \geq \nu$ in the Blackwell sense. +As predicted by the theorem, $I(\mu) \geq I(\nu)$ for every concave uncertainty function once we know $\mu \geq \nu$ in the Blackwell sense. ### Value of information as a function of experiment quality -We now parameterise a continuum of experiments that interpolate between the -completely uninformative experiment (signal is independent of the state) and the -perfectly informative one (signal perfectly reveals the state). +We now parameterize a continuum of experiments between the uninformative and perfectly informative cases. For $N = 2$ states, a natural family is @@ -747,48 +734,43 @@ $$ \quad \theta \in [0, 1], $$ -where the first term is the completely mixed (uninformative) matrix and $I_2$ is the -identity (perfectly informative). +The first term is the completely mixed matrix and $I_2$ is the identity. ```{code-cell} ipython3 -def make_experiment(theta, N=2): - """ - Parameterised experiment: theta=0 is uninformative, theta=1 is perfect. - mu(theta) = (1-theta)*(1/N)*ones + theta*I - """ - return (1 - theta) * np.ones((N, N)) / N + theta * np.eye(N) +--- +mystnb: + figure: + caption: Value of information and experiment quality + name: fig-blackwell-value-by-quality +--- +def make_experiment(θ, N=2): + """Parameterized experiment: θ=0 is uninformative, θ=1 is perfect.""" + return (1 - θ) * np.ones((N, N)) / N + θ * np.eye(N) -thetas = np.linspace(0, 1, 100) +θs = np.linspace(0, 1, 100) prior2 = np.array([0.5, 0.5]) fig, ax = plt.subplots(figsize=(9, 4)) for name, U in uncertainty_functions.items(): - values = [degroot_value(make_experiment(t), prior2, U) for t in thetas] - # Normalise to [0,1] for comparability across functions + values = [degroot_value(make_experiment(θ), prior2, U) for θ in θs] vmin, vmax = values[0], values[-1] normed = (np.array(values) - vmin) / (vmax - vmin + 1e-15) - ax.plot(thetas, normed, label=name, linewidth=2) + ax.plot(θs, normed, label=name, linewidth=2) -ax.set_xlabel(r"Experiment quality $\theta$ (0 = uninformative, 1 = perfect)", +ax.set_xlabel("experiment quality θ (0 = uninformative, 1 = perfect)", fontsize=11) -ax.set_ylabel("Normalised value of information $I(\\mu(\\theta))$", fontsize=11) -ax.set_title("Value of information rises monotonically with experiment quality\n" - "for every concave uncertainty function", fontsize=11) +ax.set_ylabel("normalized value of information I(μ(θ))", fontsize=11) ax.legend(fontsize=10) plt.tight_layout() plt.show() ``` -Every concave uncertainty function assigns weakly higher value to a more informative -experiment — a graphical illustration of the equivalence (i) $\Leftrightarrow$ (iii). - ---- +Every concave uncertainty function assigns weakly higher value to a more informative experiment. -## Connection to Second-Order Stochastic Dominance +## Connection to second-order stochastic dominance -The uncertainty-function representation makes the connection to **second-order -stochastic dominance (SOSD)** explicit. +The uncertainty-function representation makes the connection to **second-order stochastic dominance (SOSD)** explicit. Because $U$ is concave, $-U$ is convex, and the condition @@ -796,284 +778,270 @@ $$ \mathbb{E}[U(p^\mu)] \leq \mathbb{E}[U(p^\nu)] \quad \text{for all concave } U $$ -is precisely the statement that $\hat\mu^c$ dominates $\hat\nu^c$ in the -**mean-preserving spread** sense on $P$. +is precisely the statement that $\hat\mu^c$ dominates $\hat\nu^c$ in the **mean-preserving spread** sense on $P$. + +The Blackwell ordering on *experiments* is therefore isomorphic to the SOSD ordering on *distributions of posteriors*. + +When $N = 2$, posterior beliefs are scalars in $[0, 1]$, and the SOSD comparison reduces to the classical integrated-CDF test. -The Blackwell ordering on *experiments* is therefore isomorphic to the SOSD ordering -on *distributions of posteriors*. +Specifically, $\hat\mu^c$ is a mean-preserving spread of $\hat\nu^c$ if and only if $\int_0^t F_\nu(s)\,ds \geq \int_0^t F_\mu(s)\,ds$ for all $t \in [0,1]$, where $F_\mu$ and $F_\nu$ are the CDFs of the posterior on $s_1$ under each experiment. + +We can verify this graphically for the two-state example above ```{code-cell} ipython3 -def lorenz_curve_1d(weights, values): - """ - Compute the Lorenz-like CDF used for SOSD comparisons. - Returns (sorted values, cumulative probability mass). - """ +--- +mystnb: + figure: + caption: Integrated-CDF check in the two-state case + name: fig-blackwell-integrated-cdf +--- +def cdf_data_1d(weights, values): + """Sort support points and cumulative masses for a discrete distribution.""" idx = np.argsort(values) sorted_vals = values[idx] - sorted_wts = weights[idx] + sorted_wts = weights[idx] cum_mass = np.cumsum(sorted_wts) return sorted_vals, cum_mass -def plot_sosd_posteriors(mu_matrix, nu_matrix, prior, title=""): - """ - Plot the CDFs of the posterior-on-s1 distributions under mu and nu, - and verify SOSD (mu dominates nu in the MPS sense). - """ - posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) - posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) +def plot_sosd_posteriors(μ_matrix, ν_matrix, prior): + """Plot CDFs and integrated CDFs for the posterior-on-s1 distributions.""" + posts_μ, probs_μ = compute_posteriors(μ_matrix, prior) + posts_ν, probs_ν = compute_posteriors(ν_matrix, prior) - p_mu = posts_mu[:, 0] - p_nu = posts_nu[:, 0] + p_μ = posts_μ[:, 0] + p_ν = posts_ν[:, 0] - sv_mu, cm_mu = lorenz_curve_1d(probs_mu, p_mu) - sv_nu, cm_nu = lorenz_curve_1d(probs_nu, p_nu) + sv_μ, cm_μ = cdf_data_1d(probs_μ, p_μ) + sv_ν, cm_ν = cdf_data_1d(probs_ν, p_ν) fig, axes = plt.subplots(1, 2, figsize=(11, 4)) - # Left: CDFs ax = axes[0] - for sv, cm, lbl, c in [(sv_mu, cm_mu, "μ", "steelblue"), - (sv_nu, cm_nu, "ν", "darkorange")]: + for sv, cm, lbl, c in [(sv_μ, cm_μ, "μ", "steelblue"), + (sv_ν, cm_ν, "ν", "darkorange")]: xs = np.concatenate([[0], sv, [1]]) ys = np.concatenate([[0], cm, [1]]) ax.step(xs, ys, where="post", label=lbl, color=c, linewidth=2) - ax.set_xlabel(r"Posterior $p(s_1 \mid x)$", fontsize=12) - ax.set_ylabel("Cumulative probability", fontsize=12) - ax.set_title("CDFs of posterior distributions", fontsize=11) - ax.legend(fontsize=11) - ax.axvline(prior[0], linestyle="--", color="gray", alpha=0.6, + ax.axvline(prior[0], linestyle="--", color="gray", alpha=0.6, linewidth=2, label="prior") + ax.set_xlabel(r"posterior $p(s_1 \mid x)$", fontsize=12) + ax.set_ylabel("cumulative probability", fontsize=12) + ax.text(0.03, 0.94, "CDFs", transform=ax.transAxes, va="top") + ax.legend(fontsize=11) - # Right: integrated CDFs (SOSD criterion: F_nu integrates >= F_mu) ax2 = axes[1] grid = np.linspace(0, 1, 200) def integrated_cdf(sorted_vals, cum_mass, grid): - # CDF at each grid point cdf = np.array([cum_mass[sorted_vals <= t].max() if np.any(sorted_vals <= t) else 0.0 for t in grid]) return np.cumsum(cdf) * (grid[1] - grid[0]) - int_mu = integrated_cdf(sv_mu, cm_mu, grid) - int_nu = integrated_cdf(sv_nu, cm_nu, grid) + int_μ = integrated_cdf(sv_μ, cm_μ, grid) + int_ν = integrated_cdf(sv_ν, cm_ν, grid) - ax2.plot(grid, int_mu, label="∫F_μ", color="steelblue", linewidth=2) - ax2.plot(grid, int_nu, label="∫F_ν", color="darkorange", linewidth=2) - ax2.fill_between(grid, int_mu, int_nu, - where=int_nu >= int_mu, + ax2.plot(grid, int_μ, label=r"$\int F_\mu$", color="steelblue", linewidth=2) + ax2.plot(grid, int_ν, label=r"$\int F_\nu$", color="darkorange", linewidth=2) + ax2.fill_between(grid, int_μ, int_ν, + where=int_ν >= int_μ, alpha=0.2, color="darkorange", - label="∫F_ν ≥ ∫F_μ (μ MPS-dominates ν)") + label=r"$\int F_\nu \geq \int F_\mu$ ($\mu$ MPS-dominates $\nu$)") ax2.set_xlabel(r"$t$", fontsize=12) - ax2.set_ylabel("Integrated CDF", fontsize=12) - ax2.set_title("SOSD: integrated CDFs\n(μ dominates ν iff ∫F_ν ≥ ∫F_μ everywhere)", - fontsize=11) + ax2.set_ylabel("integrated CDF", fontsize=12) + ax2.text(0.03, 0.94, "integrated CDFs", transform=ax2.transAxes, va="top") ax2.legend(fontsize=10) - fig.suptitle(title or "Second-order stochastic dominance of posterior distributions", - fontsize=11, y=1.01) plt.tight_layout() plt.show() -plot_sosd_posteriors(mu_info, nu_info, prior, - title="μ is a mean-preserving spread of ν:\n" - "μ second-order stochastically dominates ν") +plot_sosd_posteriors(μ_info, ν_info, prior) ``` ---- - -## The Stochastic Transformation as a Mean-Preserving Randomisation +## Mean-preserving randomization Kihlstrom proves that (i) $\Rightarrow$ (ii) by explicit construction. -Given that $\mu$ achieves at least the value of $\nu$ for every user, he constructs -a stochastic transformation $D(p^0, \cdot)$ on $P$ that is **mean-preserving**: +Given that $\mu$ achieves at least the value of $\nu$ for every decision maker, he constructs a stochastic transformation $D(p^0, \cdot)$ on $P$ that is **mean-preserving**: $$ -\int_P p\; D(p^0, dp) = p^0. +\int_P q \, D(p^0, dq) = p^0. $$ Setting $Q = D$ provides the Markov kernel witnessing Blackwell sufficiency. -The mean-preservation condition says: passing $\tilde{x}_\mu$ through $Q$ to -produce a synthetic $\tilde{x}_\nu$ cannot add information — it only destroys it. +The mean-preservation condition says: passing $\tilde{x}_\mu$ through $Q$ to produce a synthetic $\tilde{x}_\nu$ cannot add information — it only destroys it. ```{code-cell} ipython3 -def verify_garbling_mean_preservation(mu_matrix, Q_matrix, prior): - """ - Verify that the garbling Q is mean-preserving: - E[posterior under ν] = E[posterior under μ]. - Both should equal the prior. - """ - nu_matrix = mu_matrix @ Q_matrix - posts_mu, probs_mu = compute_posteriors(mu_matrix, prior) - posts_nu, probs_nu = compute_posteriors(nu_matrix, prior) +def verify_garbling_mean_preservation(μ_matrix, Q_matrix, prior): + """Verify that a garbling preserves the prior as the mean posterior.""" + ν_matrix = μ_matrix @ Q_matrix + posts_μ, probs_μ = compute_posteriors(μ_matrix, prior) + posts_ν, probs_ν = compute_posteriors(ν_matrix, prior) - mean_mu = (posts_mu * probs_mu[:, None]).sum(axis=0) - mean_nu = (posts_nu * probs_nu[:, None]).sum(axis=0) + mean_μ = (posts_μ * probs_μ[:, None]).sum(axis=0) + mean_ν = (posts_ν * probs_ν[:, None]).sum(axis=0) print(f"Prior: {prior.round(4)}") - print(f"E[p^μ]: {mean_mu.round(4)}") - print(f"E[p^ν = p^(μQ)]: {mean_nu.round(4)}") - print(f"Both equal prior? mu: {np.allclose(mean_mu, prior)}, " - f"nu: {np.allclose(mean_nu, prior)}") + print(f"E[p^μ]: {mean_μ.round(4)}") + print(f"E[p^ν = p^(μQ)]: {mean_ν.round(4)}") + print(f"Both equal prior? μ: {np.allclose(mean_μ, prior)}, " + f"ν: {np.allclose(mean_ν, prior)}") -# Q_true maps 2 signals -> 2 signals (a softening garbling) Q_soft = np.array([[0.7, 0.3], [0.3, 0.7]]) -verify_garbling_mean_preservation(mu_info, Q_soft, prior) +verify_garbling_mean_preservation(μ_info, Q_soft, prior) ``` ---- +## Comparing experiments systematically -## Comparing Experiments: A Systematic Example +We now study a grid of experiments indexed by their quality parameter $\theta$. -We now study a grid of experiments indexed by their quality parameter $\theta$ -and verify that the Blackwell ordering is faithfully reflected in: +We will compare: -1. The spread of posteriors (mean-preserving spread check). -2. The value of information under every concave $U$. -3. The SOSD ranking of posterior distributions. +1. The spread of posterior beliefs. +2. The value of information under concave uncertainty functions. +3. The integrated-CDF ranking in the two-state case. ```{code-cell} ipython3 -thetas_grid = [0.1, 0.4, 0.7, 1.0] +--- +mystnb: + figure: + caption: Posterior distributions for increasing experiment quality + name: fig-blackwell-quality-grid +--- +θ_grid = [0.1, 0.4, 0.7, 1.0] prior2 = np.array([0.5, 0.5]) fig, axes = plt.subplots(2, 2, figsize=(11, 8)) axes = axes.flat -for ax, t in zip(axes, thetas_grid): - mu_t = make_experiment(t) - posts, probs = compute_posteriors(mu_t, prior2) +for ax, θ in zip(axes, θ_grid): + μ_θ = make_experiment(θ) + posts, probs = compute_posteriors(μ_θ, prior2) p_s1 = posts[:, 0] ax.vlines(p_s1, 0, probs, linewidth=8, color="steelblue", alpha=0.7) - ax.axvline(prior2[0], color="tomato", linestyle="--", linewidth=1.5, + ax.axvline(prior2[0], color="tomato", linestyle="--", linewidth=2, label=f"prior = {prior2[0]:.2f}") - I_H = degroot_value(mu_t, prior2, entropy) - I_G = degroot_value(mu_t, prior2, gini_impurity) - ax.set_title(fr"$\theta = {t}$ | $I_H = {I_H:.3f}$ | $I_G = {I_G:.3f}$", - fontsize=11) + I_H = degroot_value(μ_θ, prior2, entropy) + I_G = degroot_value(μ_θ, prior2, gini_impurity) ax.set_xlim(0, 1) - ax.set_xlabel(r"Posterior $p(s_1 \mid x)$", fontsize=11) - ax.set_ylabel("Probability mass", fontsize=11) + ax.set_xlabel(r"posterior $p(s_1 \mid x)$", fontsize=11) + ax.set_ylabel("probability mass", fontsize=11) + ax.text(0.03, 0.94, + f"θ = {θ}\n" f"I_H = {I_H:.3f}\n" f"I_G = {I_G:.3f}", + transform=ax.transAxes, va="top") ax.legend(fontsize=10) -fig.suptitle("Distribution of posteriors for experiments of increasing quality\n" - r"$\theta = 0$: uninformative; $\theta = 1$: perfect", - fontsize=12) plt.tight_layout() plt.show() ``` -As $\theta$ rises from 0 (unifomative) to 1 (perfect), posteriors migrate toward the -vertices $\{0, 1\}$, the value of information rises under every $U$, and the -distributions form a chain under the SOSD order. +As $\theta$ rises from 0 to 1, posterior beliefs move toward the vertices $\{0, 1\}$. ---- +At the same time, the value of information rises under every concave uncertainty function. -## Application 1 — Product Quality Information (Kihlstrom 1974) +## Application 1: product quality information -{cite}`kihlstrom1974a` applies Blackwell's theorem to consumer demand for information -about product quality. +{cite:t}`kihlstrom1974a` applies Blackwell's theorem to consumer demand for information about product quality. - The unknown state $\tilde{s}$ is a product parameter $\theta$. - A consumer can purchase $\lambda$ units of information at cost $c(\lambda)$. - As $\lambda$ rises, the experiment becomes more informative in the Blackwell sense. -The Blackwell ordering certifies that "more information is always better" for every -expected-utility maximiser when information is free. +The Blackwell order says that, absent costs, more information is always better for every expected-utility maximizer. + +Optimal information demand equates the *marginal value of the standard experiment* to its *marginal cost*. -The consumer's demand for information equates the *marginal value of the standard -experiment* to its *marginal cost*. +In the example below, we assume a linear cost $c \cdot \lambda$ and a simple family of experiments $\mu(\theta)$ as above with $c = 0.4$ ```{code-cell} ipython3 -def consumer_value(theta, prior2, U=entropy, cost_per_unit=0.5): - """ - Value of purchasing experiment quality theta. - Returns gross value I(theta) and net value I(theta) - cost. - """ - mu_t = make_experiment(theta) - gross = degroot_value(mu_t, prior2, U) - net = gross - cost_per_unit * theta +--- +mystnb: + figure: + caption: Information demand in a simple quality example + name: fig-blackwell-information-demand +--- +def consumer_value(θ, prior2, U=entropy, cost_per_unit=0.5): + """Value of purchasing experiment quality θ.""" + μ_t = make_experiment(θ) + gross = degroot_value(μ_t, prior2, U) + net = gross - cost_per_unit * θ return gross, net -thetas_fine = np.linspace(0, 1, 200) +θ_fine = np.linspace(0, 1, 200) gross_vals = [] net_vals = [] marginal_vals = [] -for t in thetas_fine: - g, n = consumer_value(t, prior2, entropy, cost_per_unit=0.4) +for θ in θ_fine: + g, n = consumer_value(θ, prior2, entropy, cost_per_unit=0.4) gross_vals.append(g) net_vals.append(n) -# Marginal value (numerical derivative) -marginal_vals = np.gradient(gross_vals, thetas_fine) +marginal_vals = np.gradient(gross_vals, θ_fine) fig, axes = plt.subplots(1, 2, figsize=(12, 4)) ax = axes[0] -ax.plot(thetas_fine, gross_vals, label="Gross value $I(\\theta)$", +ax.plot(θ_fine, gross_vals, label="Gross value I(θ)", color="steelblue", linewidth=2) -ax.plot(thetas_fine, [0.4 * t for t in thetas_fine], - label="Cost $c \\cdot \\theta$", color="tomato", +ax.plot(θ_fine, [0.4 * t for t in θ_fine], + label="Cost c · θ", color="tomato", linestyle="--", linewidth=2) -ax.plot(thetas_fine, net_vals, label="Net value", color="green", linewidth=2) -ax.set_xlabel(r"Experiment quality $\theta$", fontsize=11) -ax.set_ylabel("Value (Shannon entropy units)", fontsize=11) -ax.set_title("Gross value, cost, and net value of information", fontsize=11) +ax.plot(θ_fine, net_vals, label="Net value", color="green", linewidth=2) +ax.set_xlabel("experiment quality θ", fontsize=11) +ax.set_ylabel("value (Shannon entropy units)", fontsize=11) ax.legend(fontsize=10) ax2 = axes[1] -ax2.plot(thetas_fine, marginal_vals, label="Marginal value $I'(\\theta)$", +ax2.plot(θ_fine, marginal_vals, label="Marginal value I'(θ)", color="steelblue", linewidth=2) ax2.axhline(0.4, color="tomato", linestyle="--", linewidth=2, label="Marginal cost $c = 0.4$") opt_idx = np.argmin(np.abs(np.array(marginal_vals) - 0.4)) -ax2.axvline(thetas_fine[opt_idx], color="green", linestyle=":", - label=fr"Optimal $\theta^* \approx {thetas_fine[opt_idx]:.2f}$") -ax2.set_xlabel(r"Experiment quality $\theta$", fontsize=11) -ax2.set_ylabel("Marginal value / Marginal cost", fontsize=11) -ax2.set_title("Optimal demand for information:\n" - "MV = MC at optimal $\\theta^*$", fontsize=11) +ax2.axvline(θ_fine[opt_idx], color="green", linestyle=":", + linewidth=2, + label=f"Optimal θ* ≈ {θ_fine[opt_idx]:.2f}") +ax2.set_xlabel("experiment quality θ", fontsize=11) +ax2.set_ylabel("marginal value / marginal cost", fontsize=11) ax2.legend(fontsize=10) plt.tight_layout() plt.show() ``` -The optimal demand for information $\theta^*$ occurs where marginal value equals -marginal cost. Both axes shift as the cost $c$ changes, demonstrating comparative -statics. +The optimal demand for information $\theta^*$ occurs where marginal value equals marginal cost. ---- +Comparative statics follow from shifts in either curve. + +## Application 2: sequential experimental design -## Application 2 — Sequential Experimental Design (DeGroot 1962) +{cite:t}`degroot1962` applies the uncertainty-function framework to *sequential experimental design*. -{cite}`degroot1962` applies the uncertainty-function framework to **sequential -experimental design**. +Each period a statistician observes one draw and updates the posterior. -Each period a statistician observes one draw and updates their posterior. The -question is which sequence of experiments minimises cumulative expected uncertainty. +The question is which sequence of experiments minimizes cumulative expected uncertainty. -The Blackwell theorem implies that if one experiment is more informative than another -at every stage, the optimal sequential strategy simply uses the better experiment at -every period. +If one experiment is more informative than another at every stage, then the Blackwell order favors using the better experiment at every date. -We simulate sequential belief updating for experiments of different quality. +We now simulate sequential belief updating for experiments of different quality. ```{code-cell} ipython3 -def sequential_update(mu_matrix, prior, T=20, seed=0): - """ - Simulate T sequential belief updates under experiment mu. - Returns the path of posterior beliefs (T+1, N). - """ +--- +mystnb: + figure: + caption: Sequential posterior paths for different experiment qualities + name: fig-blackwell-sequential-paths +--- +def sequential_update(μ_matrix, prior, T=20, seed=0): + """Simulate T sequential belief updates under experiment μ.""" rng = np.random.default_rng(seed) - N, M = mu_matrix.shape + N, M = μ_matrix.shape beliefs = np.zeros((T + 1, N)) beliefs[0] = prior.copy() @@ -1081,57 +1049,56 @@ def sequential_update(mu_matrix, prior, T=20, seed=0): for t in range(T): p = beliefs[t] - # Draw a signal from the true state - signal = rng.choice(M, p=mu_matrix[true_state]) - # Bayes update - unnorm = mu_matrix[:, signal] * p + signal = rng.choice(M, p=μ_matrix[true_state]) + unnorm = μ_matrix[:, signal] * p beliefs[t + 1] = unnorm / unnorm.sum() return beliefs, true_state -def plot_sequential_beliefs(thetas_compare, prior2, T=25): - fig, axes = plt.subplots(1, len(thetas_compare), figsize=(14, 4), sharey=True) +def plot_sequential_beliefs(θs_compare, prior2, T=25): + fig, axes = plt.subplots(1, len(θs_compare), figsize=(14, 4), sharey=True) - for ax, theta in zip(axes, thetas_compare): - mu_t = make_experiment(theta, N=2) + for ax, θ in zip(axes, θs_compare): + μ_t = make_experiment(θ, N=2) for seed in range(15): - beliefs, ts = sequential_update(mu_t, prior2, T=T, seed=seed) + beliefs, ts = sequential_update(μ_t, prior2, T=T, seed=seed) c = "steelblue" if ts == 0 else "darkorange" - ax.plot(beliefs[:, 0], alpha=0.4, color=c, linewidth=1.2) - ax.axhline(prior2[0], linestyle="--", color="gray", linewidth=1, + ax.plot(beliefs[:, 0], alpha=0.35, color=c, linewidth=2) + ax.axhline(prior2[0], linestyle="--", color="gray", linewidth=2, label="prior") - ax.axhline(1.0, linestyle=":", color="steelblue", linewidth=0.8) - ax.axhline(0.0, linestyle=":", color="darkorange", linewidth=0.8) - ax.set_title(fr"$\theta = {theta}$", fontsize=12) - ax.set_xlabel("Period $t$", fontsize=11) - if theta == thetas_compare[0]: - ax.set_ylabel(r"Posterior $p(s_1 \mid x^t)$", fontsize=11) + ax.axhline(1.0, linestyle=":", color="steelblue", linewidth=2) + ax.axhline(0.0, linestyle=":", color="darkorange", linewidth=2) + ax.set_xlabel(r"period $t$", fontsize=11) + if θ == θs_compare[0]: + ax.set_ylabel(r"posterior $p(s_1 \mid x^t)$", fontsize=11) ax.set_ylim(-0.05, 1.05) + ax.text(0.03, 0.94, f"θ = {θ}", transform=ax.transAxes, va="top") ax.legend(fontsize=9) - fig.suptitle("Sequential belief paths under experiments of increasing quality\n" - "Blue = true state $s_1$; Orange = true state $s_2$", - fontsize=11) plt.tight_layout() plt.show() plot_sequential_beliefs([0.2, 0.5, 0.9], prior2, T=30) ``` -More informative experiments (larger $\theta$) cause beliefs to converge faster to the -truth. Under the uniform prior and perfectly symmetric experiments, belief paths are -martingales — the law of iterated expectations for beliefs. +More informative experiments make beliefs converge faster to the truth. + +Under the correct prior, the posterior process is a martingale. ```{code-cell} ipython3 -# Verify the martingale property: E[p_{t+1} | x^t] = p_t -def check_martingale(mu_matrix, prior, T=15, n_paths=2000, seed=0): +--- +mystnb: + figure: + caption: Unconditional implication of the posterior martingale property + name: fig-blackwell-martingale-mean +--- +def check_martingale_mean(μ_matrix, prior, T=15, n_paths=2000, seed=0): """ - Simulate many belief paths and check E[p_{t+1}] ≈ E[p_t]. - Under the true prior, belief sequences are martingales. + Simulate many belief paths and check E[p_t] = p_0. """ rng = np.random.default_rng(seed) - N, M = mu_matrix.shape + N, M = μ_matrix.shape all_paths = np.zeros((n_paths, T + 1, N)) for k in range(n_paths): @@ -1139,64 +1106,52 @@ def check_martingale(mu_matrix, prior, T=15, n_paths=2000, seed=0): p = prior.copy() all_paths[k, 0] = p for t in range(T): - signal = rng.choice(M, p=mu_matrix[true_state]) - unnorm = mu_matrix[:, signal] * p + signal = rng.choice(M, p=μ_matrix[true_state]) + unnorm = μ_matrix[:, signal] * p p = unnorm / unnorm.sum() all_paths[k, t + 1] = p - mean_path = all_paths[:, :, 0].mean(axis=0) # E[p(s1)] over paths + mean_path = all_paths[:, :, 0].mean(axis=0) fig, ax = plt.subplots(figsize=(8, 4)) ax.plot(mean_path, color="steelblue", linewidth=2, label=r"$\bar p_t(s_1)$ (mean over paths)") - ax.axhline(prior[0], linestyle="--", color="tomato", linewidth=1.5, + ax.axhline(prior[0], linestyle="--", color="tomato", linewidth=2, label=fr"Prior $p_0 = {prior[0]:.2f}$") - ax.set_xlabel("Period $t$", fontsize=12) + ax.set_xlabel(r"period $t$", fontsize=12) ax.set_ylabel(r"$E[p_t(s_1)]$", fontsize=12) - ax.set_title(r"Belief martingale: $E[p_t(s_1)]$ stays at the prior" - "\n(law of iterated expectations for beliefs)", fontsize=11) ax.legend(fontsize=11) ax.set_ylim(0, 1) plt.tight_layout() plt.show() print(f"Prior = {prior[0]:.4f}") - print(f"Mean belief (averaged over {n_paths} paths and time): " - f"{mean_path.mean():.4f}") + print(f"Average mean belief across dates: {mean_path.mean():.4f}") -check_martingale(mu_info, prior, T=20, n_paths=5000) +check_martingale_mean(μ_info, prior, T=20, n_paths=5000) ``` -The mean posterior tracks the prior throughout — reflecting the law of iterated -expectations applied to beliefs. +The simulated cross-sectional mean stays close to the prior at every date. ---- +This is the unconditional implication of the posterior martingale property. ## Summary -Blackwell's theorem identifies a **partial order** on statistical experiments with -three equivalent characterisations: +Blackwell's theorem identifies a *partial order* on statistical experiments with +three equivalent characterizations: | Criterion | Condition | |-----------|-----------| -| **Economic** | Every decision maker prefers $\mu$ to $\nu$: $B(\mu,A) \supseteq B(\nu,A)$ | -| **Sufficiency** | $\nu$ is a garbling of $\mu$: $\nu = \mu Q$ for some Markov $Q$ | -| **Uncertainty** | $\mu$ reduces every concave $U$ more: $E[U(p^\mu)] \leq E[U(p^\nu)]$ | +| Economic | Every decision maker weakly prefers $\mu$ to $\nu$: $B(\mu, A, u) \supseteq B(\nu, A, u)$ | +| Sufficiency | $\nu$ is a garbling of $\mu$: $\nu = \mu Q$ for some Markov $Q$ | +| Uncertainty | $\mu$ reduces expected uncertainty more for every prior $p$ and every concave $U$ | -Kihlstrom's Bayesian exposition clarifies the theorem's geometry by placing the -**posterior distribution** at the centre: +Kihlstrom's Bayesian exposition places the *posterior distribution* at the center. -- A more informative experiment creates a **more dispersed** distribution of - posteriors — a mean-preserving spread of the posterior distribution induced by - the less informative experiment. -- This links the Blackwell order to **second-order stochastic dominance** on the - probability simplex $P$. -- The uncertainty-function criterion is then transparent: because $U$ is concave, - more dispersed posteriors (mean-preserving spread) correspond to higher expected - $U$ — equivalently, lower expected uncertainty. +A more informative experiment generates a more dispersed posterior distribution with the same mean prior. -DeGroot's contribution is to extend the criterion from specific utility functions -to the *entire class* of concave uncertainty functions, confirming the full -generality of Blackwell's result. +The right probabilistic language is convex order, and the Blackwell ordering on experiments is isomorphic to the second-order stochastic dominance (SOSD) ordering on distributions of posteriors. +In the two-state case this reduces to the familiar mean-preserving-spread comparison on $[0, 1]$, which can be verified with the integrated-CDF test. +DeGroot's contribution is to extend the comparison from particular utility functions to the full class of concave uncertainty functions. From 04b05aed8c53f01338d67279815cbdc54bcc6996 Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Tue, 31 Mar 2026 11:39:10 -0400 Subject: [PATCH 03/20] Tom's March 31 edits of two lectures --- environment.yml | 4 +- lectures/_static/quant-econ.bib | 68 ++ lectures/_toc.yml | 3 +- lectures/blackwell_kihlstrom.md | 165 ++++- lectures/merging_of_opinions.md | 1224 +++++++++++++++++++++++++++++++ 5 files changed, 1458 insertions(+), 6 deletions(-) create mode 100644 lectures/merging_of_opinions.md diff --git a/environment.yml b/environment.yml index dfd3a27ba..51df47972 100644 --- a/environment.yml +++ b/environment.yml @@ -2,8 +2,8 @@ name: quantecon channels: - default dependencies: - - python=3.13 - - anaconda=2025.12 + - python=3.12 + - anaconda=2024.10 - pip - pip: - jupyter-book>=1.0.4post1,<2.0 diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index b62cf6ecb..e495748c8 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -3,6 +3,74 @@ Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ ### +@article{blackwell1962, + author = {Blackwell, David and Dubins, Lester E.}, + title = {Merging of Opinions with Increasing Information}, + journal = {Annals of Mathematical Statistics}, + year = {1962}, + volume = {33}, + number = {3}, + pages = {882--886}, +} + +@article{aumann1976, + author = {Aumann, Robert J.}, + title = {Agreeing to Disagree}, + journal = {Annals of Statistics}, + year = {1976}, + volume = {4}, + number = {6}, + pages = {1236--1239}, +} + +@book{doob1953, + author = {Doob, Joseph L.}, + title = {Stochastic Processes}, + publisher = {Wiley}, + address = {New York}, + year = {1953}, +} + +@article{kakutani1948, + author = {Kakutani, Shizuo}, + title = {On Equivalence of Infinite Product Measures}, + journal = {Annals of Mathematics}, + year = {1948}, + volume = {49}, + number = {1}, + pages = {214--224}, +} + +@article{girsanov1960, + author = {Girsanov, Igor V.}, + title = {On Transforming a Certain Class of Stochastic Processes + by Absolutely Continuous Substitution of Measures}, + journal = {Theory of Probability and Its Applications}, + year = {1960}, + volume = {5}, + number = {3}, + pages = {285--301}, +} + +@article{novikov1972, + author = {Novikov, Alexander A.}, + title = {On an Identity for Stochastic Integrals}, + journal = {Theory of Probability and Its Applications}, + year = {1972}, + volume = {17}, + number = {4}, + pages = {717--720}, +} + +@article{diaconis1986, + author = {Diaconis, Persi and Freedman, David}, + title = {On the Consistency of {Bayes} Estimates}, + journal = {Annals of Statistics}, + year = {1986}, + volume = {14}, + number = {1}, + pages = {1--26}, +} @inproceedings{blackwell1951, diff --git a/lectures/_toc.yml b/lectures/_toc.yml index 247a89b34..10ecb41c8 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -36,15 +36,16 @@ parts: - file: divergence_measures - file: likelihood_ratio_process - file: likelihood_ratio_process_2 - - file: blackwell_kihlstrom - file: likelihood_var - file: imp_sample - file: wald_friedman - file: wald_friedman_2 - file: exchangeable - file: likelihood_bayes + - file: blackwell_kihlstrom - file: mix_model - file: navy_captain + - file: merging_of_opinions - caption: Linear Programming numbered: true chapters: diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index 818882885..fca0943d5 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -28,9 +28,29 @@ kernelspec: ## Overview -This lecture studies *Blackwell's theorem* {cite}`blackwell1951,blackwell1953` on ranking statistical experiments, following the Bayesian exposition in {cite}`kihlstrom1984`. -Suppose that two signals, $\tilde{x}_\mu$ and $\tilde{x}_\nu$, are both informative about an unknown state $\tilde{s}$. + +This lecture studies *Blackwell's theorem* {cite}`blackwell1951,blackwell1953` on ranking statistical experiments. + +Our presentation brings in findings from a Bayesian interpretation of Blackwell's theorem by {cite}`kihlstrom1984`. + +Blackwell and Kihlstrom study questions closely related to those encountered in this QuantEcon lecture {doc}`likelihood_bayes`. + +To appreciate the connection involved, it is helpful up front to appreciate how Blackwell's notion of +an **experiment** is related to the concept of a ''probability distribution'' or ''parameterized statistical model'' appearing in {doc}`likelihood_bayes` + +Blackwell studies a situation in which a decision maker wants to know a state $s$ living in a space $S$. + +For Blackwell, an **experiment** is a **conditional probability model** $\{\mu(\cdot \mid s) : s \in S\}$, i.e., a family of distributions indexed by the unknown state. + +We are free to interpret "state" as "parameter". + +In a two-state case $S = \{s_1, s_2\}$, the two conditional densities $f(\cdot) = \mu(\cdot \mid s_1)$ and $g(\cdot) = \mu(\cdot \mid s_2)$ are the ones used repeatedly in our studies of classical hypothesis testing and Bayesian inference in this suite of QuantEcon lectures. + +Blackwell's question — *which experiment is more informative?* — is about which conditional probability model allows a Bayesian with a prior over $\{s_1, s_2\}$ to learn more about which model governs the world. + + +Thus, suppose that two signals, $\tilde{x}_\mu$ and $\tilde{x}_\nu$, are both informative about an unknown state $\tilde{s}$. Blackwell's question is which signal is more informative. @@ -41,7 +61,7 @@ This economic criterion is equivalent to two statistical criteria: - *Sufficiency* (Blackwell): $\tilde{x}_\nu$ can be generated from $\tilde{x}_\mu$ by an additional randomization. - *Uncertainty reduction* (DeGroot {cite}`degroot1962`): $\tilde{x}_\mu$ lowers expected uncertainty at least as much as $\tilde{x}_\nu$ for every concave uncertainty function. -Kihlstrom's reformulation places the *posterior distribution* at the center. +Kihlstrom's reformulation focuses on the *posterior distribution*. More informative experiments generate posterior distributions that are more dispersed in convex order. @@ -86,6 +106,7 @@ $$ Each row $i$ gives the distribution of signals when the true state is $s_i$. + ```{code-cell} ipython3 μ = np.array([[0.6, 0.3, 0.1], [0.1, 0.3, 0.6]]) @@ -1155,3 +1176,141 @@ The right probabilistic language is convex order, and the Blackwell ordering on In the two-state case this reduces to the familiar mean-preserving-spread comparison on $[0, 1]$, which can be verified with the integrated-CDF test. DeGroot's contribution is to extend the comparison from particular utility functions to the full class of concave uncertainty functions. + +--- + +## Relation to Bayesian likelihood-ratio learning + +The lecture {doc}`likelihood_bayes` studies Bayesian learning in a setting that is a special, dynamic instance of everything developed here. + +This section transports concepts back and forth between the two lectures. + +### The state space is the same + +In {doc}`likelihood_bayes` the unknown "state of the world" is which density nature chose permanently: nature drew the data either from $f$ or from $g$, but not which one is known to the observer. + +This is a two-element finite state space + +$$ +S = \{s_1, s_2\} \qquad \text{with } s_1 \leftrightarrow f,\quad s_2 \leftrightarrow g. +$$ + +The Bayesian prior $\pi_0 \in [0,1]$ on $s_1 = f$ plays exactly the role of the prior $p \in P$ on the probability simplex in the present lecture. + +### A single draw is an experiment + +A single observation $w_t$ constitutes a Blackwell experiment with signal space $X$ and Markov kernel + +$$ +\mu = \begin{pmatrix} f(\cdot) \\ g(\cdot) \end{pmatrix}, +$$ + +where row $i$ is the conditional density of the signal given state $s_i$: +$\mu(\cdot \mid s_1) = f(\cdot)$ and $\mu(\cdot \mid s_2) = g(\cdot)$. + +This is the continuous-signal analogue of the $N \times M$ Markov matrix studied above (with $N = 2$ states and a continuum of signals instead of $M$ discrete ones). + +### $t$ IID draws form a richer experiment + +Observing the history $w^t = (w_1, \ldots, w_t)$ is a strictly more informative Blackwell experiment than observing any sub-history $w^s$ for $s < t$, because the conditional joint densities for $w^t$ are + +$$ +\mu_t(\cdot \mid s_1) = f(w_1) f(w_2) \cdots f(w_t), +\qquad +\mu_t(\cdot \mid s_2) = g(w_1) g(w_2) \cdots g(w_t). +$$ + +The experiment $\mu_t$ Blackwell-dominates $\mu_s$ for any $t > s$: you can always garble $w^t$ down to $w^s$ by discarding the last $t - s$ draws, which is an explicit stochastic transformation $Q$ satisfying $\mu_s = \mu_t Q$. + +The reverse is impossible — you cannot reconstruct information from fewer draws. + +This is why more data is always weakly better for every expected-utility maximiser (the economic criterion of Blackwell's theorem). + +### The likelihood ratio process is the sufficient statistic of the experiment + +The key formula in {doc}`likelihood_bayes` is + +$$ +\pi_{t+1} = \frac{\pi_0 \, L(w^{t+1})}{\pi_0 \, L(w^{t+1}) + 1 - \pi_0}, +\qquad +L(w^t) = \prod_{i=1}^t \frac{f(w_i)}{g(w_i)}. +$$ + +Because $\pi_{t+1}$ depends on $w^t$ **only through** $L(w^t)$, the likelihood ratio process is a **sufficient statistic** for the experiment $\mu_t$. + +In Blackwell's language, the experiment "report $L(w^t)$" is informationally equivalent to "report $w^t$": passing $w^t$ through the deterministic map $w^t \mapsto L(w^t)$ is a (degenerate) stochastic transformation that discards nothing relevant to discriminating $f$ from $g$. + +### The posterior lives on the 1-simplex and is Kihlstrom's standard experiment + +With $N = 2$ states the probability simplex $P$ collapses to the unit interval $[0,1]$. +Kihlstrom's standard experiment records only the posterior + +$$ +\pi_t = \Pr(s = f \mid w^t), +$$ + +which is the sufficient statistic that the Bayesian tracks throughout. + +The **distribution** of $\pi_t$ over all possible histories $w^t$ is Kihlstrom's $\hat{\mu}^c$ — the distribution of posteriors induced by the experiment $\mu_t$ starting from prior $\pi_0 = c$. + +### The martingale property is mean preservation + +{doc}`likelihood_bayes` proves that $\{\pi_t\}$ is a **martingale**: + +$$ +E[\pi_t \mid \pi_{t-1}] = \pi_{t-1}, +$$ + +and in particular $E[\pi_t] = \pi_0$ for all $t$. + +This is exactly the **mean-preservation** condition that sits at the centre of Kihlstrom's reformulation: the distribution of posteriors $\hat{\mu}^c$ must satisfy $\int_P p \, \hat{\mu}^c(dp) = c$. + +Mean preservation is not a special feature of this two-state example; it is an exact consequence of Bayes' law for **any** experiment. + +### Blackwell's theorem explains why more data always helps + +Kihlstrom's reformulation states: + +> $\mu_t \geq \mu_s$ in Blackwell's sense if and only if $\hat{\mu}_t^c$ is a **mean-preserving spread** of $\hat{\mu}_s^c$, i.e., posteriors under $\mu_t$ are more dispersed than under $\mu_s$. + +In the {doc}`likelihood_bayes` setting this means the distribution of $\pi_t$ is a mean-preserving spread of the distribution of $\pi_s$ for $t > s$: more data pushes posteriors further from the prior toward either $0$ or $1$. + +The almost-sure convergence $\pi_t \to 0$ or $1$ is the limit of this spreading process — perfect information resolves all uncertainty, collapsing the distribution to a degenerate point mass at a vertex of the simplex. + +### DeGroot uncertainty functions and mutual information + +The Shannon entropy of the two-state posterior is + +$$ +U_H(\pi) = -\pi \log \pi - (1-\pi)\log(1-\pi). +$$ + +DeGroot's value of information for the experiment that generates $t$ draws is + +$$ +I(\mu_t;\, U_H) = U_H(\pi_0) - E[U_H(\pi_t)], +$$ + +which equals the **mutual information** between the history $w^t$ and the unknown state. + +Because $\mu_t$ Blackwell-dominates $\mu_s$ for $t > s$, Blackwell's theorem guarantees $I(\mu_t; U) \geq I(\mu_s; U)$ for **every** concave uncertainty function $U$ — more draws reduce expected uncertainty under every such measure, not just Shannon entropy. + +### Summary table + +The table below collects the complete translation between concepts in the two lectures. + +| Concept in {doc}`likelihood_bayes` | Concept in this lecture | +|---|---| +| States $\{f, g\}$ | State space $S = \{s_1, s_2\}$ | +| Densities $f(\cdot)$, $g(\cdot)$ | Rows of experiment matrix $\mu$ | +| Single draw $w_t$ | Blackwell experiment with continuous signal space | +| History $w^t$ of $t$ IID draws | Richer experiment $\mu_t$ Blackwell-dominating $\mu_s$, $s < t$ | +| Likelihood ratio $L(w^t)$ | Sufficient statistic / standard experiment | +| Prior $\pi_0$ | Prior $p \in P$ on the 1-simplex $[0,1]$ | +| Posterior $\pi_t$ | Posterior random variable on $P = [0,1]$ | +| Distribution of $\pi_t$ across histories | $\hat{\mu}^c$ (Kihlstrom's posterior distribution) | +| Martingale property $E[\pi_t] = \pi_0$ | Mean preservation of $\hat{\mu}^c$ | +| $\pi_t \to 0$ or $1$ almost surely | Posteriors spread to vertices (MPS in the limit) | +| Mutual information $I(\mu_t; U_H)$ | DeGroot value of information | +| More draws $\Rightarrow$ better for all decision makers | Blackwell ordering $\mu_t \geq \mu_s$ | +| Garbling (discard last $t - s$ draws) | Stochastic transformation $Q$ with $\mu_s = \mu_t Q$ | diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md new file mode 100644 index 000000000..88cd5952a --- /dev/null +++ b/lectures/merging_of_opinions.md @@ -0,0 +1,1224 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(merging_of_opinions)= +```{raw} jupyter + +``` + +# Merging of Opinions: The Blackwell–Dubins Theorem + +```{contents} Contents +:depth: 2 +``` + +## Overview + +This lecture studies the **merging-of-opinions theorem** of {cite:t}`blackwell1962`. + +The theorem answers a central question in Bayesian epistemology and statistical decision theory: + +> If two agents hold different prior beliefs about a stochastic process but observe the same stream of data indefinitely, will their probability assessments eventually converge? + +The answer is a striking affirmative. Provided the two probability measures are +**mutually absolutely continuous** — each assigns positive probability to every +event the other considers possible — their conditional forecasts about all future +events merge to zero total-variation distance, *almost surely*. + +This result is connected to several other important ideas: + +- **Bayesian consistency**: a Bayesian agent's posterior predictions converge to + the truth whenever the prior assigns positive probability to the true model + ({doc}`likelihood_bayes`). +- **The rational-expectations hypothesis**: agents who disagree about the initial + model but share a common history will eventually agree on all conditional + forecasts ({cite:t}`aumann1976`). +- **Ergodic theory**: merging plays the role of ergodicity for non-Markovian + processes, forcing long-run agreement without requiring a common stationary + distribution. +- **Kakutani's dichotomy**: for product measures, mutual absolute continuity is + equivalent to a simple condition on Hellinger affinities, giving a clean + operational criterion for when merging is guaranteed. + +We develop the theory in discrete time, where the argument is sharpest, and +sketch the continuous-time extension. Throughout we use the +**Beta–Bernoulli conjugate model** as the running numerical example: two +agents observe the same stream of coin flips but start with very different +priors over the coin's bias. + +Let us start with some imports. + +```{code-cell} ipython3 +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import beta as beta_dist +from scipy.special import betaln + +np.random.seed(42) +``` + +--- + +## Probability Measures on Sequence Spaces + +### The sequence space and its filtration + +Let $(S, \mathscr{S})$ be a measurable space (the *signal space*). +Set $\Omega = S^{\mathbb{N}}$, the set of all infinite sequences +$\omega = (x_1, x_2, \ldots)$ with $x_n \in S$, equipped with the product +$\sigma$-algebra $\mathscr{F} = \mathscr{S}^{\otimes \mathbb{N}}$. + +For each $n \geq 1$, define the **finite-horizon** $\sigma$-algebra + +$$ +\mathscr{F}_n = \sigma(x_1, \ldots, x_n), +$$ + +so $\mathscr{F}_1 \subseteq \mathscr{F}_2 \subseteq \cdots \subseteq \mathscr{F}$. +The collection $\{\mathscr{F}_n\}_{n \geq 1}$ is the **natural filtration** +generated by the observation process; $\mathscr{F}_n$ encodes everything +that can be learned from the first $n$ data points. + +Let $P$ and $Q$ denote two probability measures on $(\Omega, \mathscr{F})$. +Write $P_n = P|_{\mathscr{F}_n}$ and $Q_n = Q|_{\mathscr{F}_n}$ for their +restrictions to the history up to time $n$. + +### Absolute continuity + +```{admonition} Definition +:class: tip +**Absolute continuity.** +$P$ is *absolutely continuous* with respect to $Q$, written $P \ll Q$, if +$Q(A) = 0$ implies $P(A) = 0$ for every $A \in \mathscr{F}$. +They are *mutually absolutely continuous* (or *equivalent*), written $P \sim Q$, +if both $P \ll Q$ and $Q \ll P$. + +$P$ is *locally absolutely continuous* with respect to $Q$ if $P_n \ll Q_n$ +for every $n \geq 1$. +Global absolute continuity $P \ll Q$ implies local absolute continuity, but +not conversely. +``` + +Mutual absolute continuity has a natural interpretation: $P \sim Q$ means the two +agents agree on which individual events are *possible*. They may disagree about +how *likely* those events are, but neither agent considers an event impossible +that the other considers possible. + +### Total variation distance + +```{admonition} Definition +:class: tip +**Total variation distance.** +For two probability measures $\mu$ and $\nu$ on $(E, \mathscr{E})$, + +$$ +\|\mu - \nu\|_{\mathrm{TV}} += \sup_{A \in \mathscr{E}} |\mu(A) - \nu(A)| += \frac{1}{2} \int_E \left|\frac{d\mu}{d\lambda} - \frac{d\nu}{d\lambda}\right| d\lambda, +$$ + +where $\lambda$ is any dominating measure. Equivalently, $\|\mu - \nu\|_{\mathrm{TV}} \in [0,1]$, +with 0 meaning $\mu = \nu$ and 1 meaning $\mu \perp \nu$ (mutual singularity). +``` + +When $\mu \ll \nu$ with $f = d\mu/d\nu$, + +$$ +\|\mu - \nu\|_{\mathrm{TV}} = \mathbb{E}_\nu[(f-1)^+] = 1 - \mathbb{E}_\nu[\min(f,1)]. +$$ + +The total variation distance is the **strongest** notion of convergence of +probability measures. If two probability measures are close in total variation, +they are close in every possible statistical sense: their probabilities of any +event differ by at most $\|\mu - \nu\|_{\mathrm{TV}}$. + +### The merging question + +The Blackwell–Dubins theorem is about the conditional distributions of the +**future** given the **past**. At time $n$, after observing $(x_1,\ldots,x_n)$, +each agent forms a conditional distribution over all future events: + +$$ +P(\,\cdot\,|\,\mathscr{F}_n)(\omega), \qquad +Q(\,\cdot\,|\,\mathscr{F}_n)(\omega). +$$ + +These are probability measures on the whole future path, not just the next +observation. The merging question asks whether + +$$ +d_n \;:=\; \bigl\|P(\,\cdot\,|\,\mathscr{F}_n) - Q(\,\cdot\,|\,\mathscr{F}_n)\bigr\|_{\mathrm{TV}} +\;\longrightarrow\; 0 +$$ + +almost surely as $n \to \infty$. + +--- + +## The Likelihood-Ratio Martingale + +The main mathematical tool is the **Radon–Nikodym derivative process**. + +### The likelihood ratio + +Since $P \ll Q$ implies $P_n \ll Q_n$ for every $n$, the Radon–Nikodym +theorem guarantees the existence of the **likelihood ratio** + +$$ +Z_n = \frac{dP_n}{dQ_n}, \qquad Z_n \geq 0 \;\; Q\text{-a.s.}, +\qquad \mathbb{E}_Q[Z_n] = 1. +$$ + +The key structural property is that global absolute continuity $P \ll Q$ +implies the existence of an overall Radon–Nikodym derivative $Z = dP/dQ$ +on all of $(\Omega, \mathscr{F})$, and + +$$ +Z_n = \mathbb{E}_Q[Z \,|\, \mathscr{F}_n] \qquad Q\text{-a.s.} +$$ + +That is, $\{Z_n, \mathscr{F}_n\}_{n \geq 1}$ is a **non-negative, uniformly +integrable $Q$-martingale**. + +```{admonition} Lemma (Martingale convergence) +:class: note +The likelihood-ratio process $\{Z_n\}$ satisfies: + +1. $Z_n \to Z_\infty$ $Q$-almost surely as $n \to \infty$. +2. $Z_\infty = \mathbb{E}_Q[Z \,|\, \mathscr{F}_\infty]$ $Q$-a.s. +3. $Z_n \to Z_\infty$ in $L^1(Q)$: $\;\mathbb{E}_Q[|Z_n - Z_\infty|] \to 0$. + +*Proof sketch.* Non-negativity and the martingale property give boundedness +in $L^1(Q)$. Then almost-sure convergence follows from Doob's martingale +convergence theorem {cite:t}`doob1953`. Uniform integrability (which follows +from $Z \in L^1(Q)$ via the conditional Jensen inequality) upgrades this to +$L^1(Q)$ convergence. $\square$ +``` + +### Connecting conditional measures to the likelihood ratio + +The following identity is the key bridge between the likelihood ratio and the +conditional distributions. + +On the set $\{Z_n > 0\}$, the Radon–Nikodym derivative of +$P(\,\cdot\,|\,\mathscr{F}_n)$ with respect to $Q(\,\cdot\,|\,\mathscr{F}_n)$ +is + +$$ +\frac{d\,P(\,\cdot\,|\,\mathscr{F}_n)}{d\,Q(\,\cdot\,|\,\mathscr{F}_n)} += \frac{Z_\infty}{Z_n} +\qquad Q\text{-a.s.\ on } \{Z_n > 0\}. +$$ + +Applying the total-variation formula with $f = Z_\infty / Z_n$ then gives + +$$ +\boxed{ +d_n += \mathbb{E}_{Q(\cdot|\mathscr{F}_n)}\!\left[\left(\frac{Z_\infty}{Z_n} - 1\right)^{\!+}\right] += 1 - \mathbb{E}_{Q(\cdot|\mathscr{F}_n)}\!\left[\min\!\left(\frac{Z_\infty}{Z_n},\,1\right)\right]. +} +$$ + +Multiplying through by $Z_n$ and integrating with respect to $Q$: + +$$ +2\,\mathbb{E}_Q[d_n] \;\leq\; \mathbb{E}_Q[|Z_\infty - Z_n|], +$$ + +so the $L^1$ convergence of the martingale directly controls the rate at +which the total variation distance between the two agents' conditional +forecasts goes to zero. + +--- + +## The Blackwell–Dubins Theorem + +```{admonition} Theorem (Blackwell–Dubins, 1962) +:class: important +Let $P$ and $Q$ be probability measures on $(\Omega, \mathscr{F})$ with +$P \ll Q$. Define + +$$ +d_n = \bigl\|P(\,\cdot\,|\,\mathscr{F}_n) - Q(\,\cdot\,|\,\mathscr{F}_n)\bigr\|_{\mathrm{TV}}. +$$ + +Then $d_n \to 0$ almost surely under $Q$ (and hence also under $P$). +``` + +### Proof ingredients + +The proof has three steps, each transparent once the framework is in place. + +**Step 1 — Representation of $d_n$ via $Z_n$.** +As derived above, $d_n$ is expressed in terms of $Z_\infty / Z_n$ via the +conditional Bayes formula. This reduces the problem from a statement about +two different probability measures to a statement about a single martingale +under $Q$. + +**Step 2 — $\{d_n\}$ is a $Q$-supermartingale.** +Conditioning on more information cannot make the two measures easier to +distinguish; it can only make them harder. Formally, because +$P(\,\cdot\,|\,\mathscr{F}_n) = \mathbb{E}[P(\,\cdot\,|\,\mathscr{F}_{n+1})\,|\,\mathscr{F}_n]$ +and total variation is convex, + +$$ +\mathbb{E}_Q[d_{n+1}\,|\,\mathscr{F}_n] \leq d_n \qquad Q\text{-a.s.} +$$ + +So $\{d_n, \mathscr{F}_n\}$ is a non-negative $Q$-supermartingale taking +values in $[0,1]$. By Doob's supermartingale convergence theorem, $d_n \to d_\infty$ +$Q$-almost surely for some $[0,1]$-valued random variable $d_\infty$. + +**Step 3 — The almost-sure limit is zero.** +From Step 1 and the $L^1$ bound: + +$$ +\mathbb{E}_Q[d_n] \leq \tfrac{1}{2}\,\mathbb{E}_Q[|Z_\infty - Z_n|] \to 0. +$$ + +Hence $d_n \to 0$ in $L^1(Q)$, so $d_n \to 0$ in probability under $Q$. +But $d_n \to d_\infty$ $Q$-a.s. and $d_n \to 0$ in probability together +force $d_\infty = 0$ $Q$-a.s. Since $P \ll Q$, every $Q$-null set is +$P$-null, so $d_n \to 0$ $P$-a.s. as well. $\square$ + +```{admonition} Remark (One-sided vs. mutual absolute continuity) +:class: note +The theorem requires only $P \ll Q$, not $Q \ll P$. +Under one-sided absolute continuity, merging holds $Q$-a.s. (and hence +$P$-a.s.). If additionally $Q \ll P$ — that is, $P \sim Q$ — then merging +holds under *both* agents' measures: neither agent has a positive-probability +path on which the other agent's beliefs remain permanently different. +``` + +```{admonition} Remark (Tightness) +:class: note +The theorem is sharp. If $P \perp Q$ (mutual singularity), then there +exists a set $A$ with $P(A) = 1$ and $Q(A) = 0$. By Lévy's zero-one +law, $Q(A|\mathscr{F}_n) \to 0$ and $P(A|\mathscr{F}_n) \to 1$ +almost surely, so $d_n \to 1$ rather than zero. Absolute continuity is +not merely sufficient; the dichotomy between $P \ll Q$ and $P \perp Q$ +is qualitatively sharp. +``` + +--- + +## The Beta–Bernoulli Model + +Before turning to Python, we introduce the main example used throughout +the simulations. + +### Model + +Suppose the data stream $(x_1, x_2, \ldots)$ consists of IID Bernoulli +draws with unknown probability $p^* \in (0,1)$. Agent $i$ holds a +Beta prior: + +$$ +p \sim \mathrm{Beta}(\alpha_i, \beta_i), \qquad i = 1, 2. +$$ + +After observing $n$ draws with $k$ successes, Bayes' rule yields the +posterior + +$$ +p \,|\, x^n \;\sim\; \mathrm{Beta}(\alpha_i + k,\; \beta_i + n - k), +$$ + +and the one-step-ahead predictive probability is + +$$ +\hat{p}_i^n = \mathbb{E}[p\,|\,x^n] = \frac{\alpha_i + k}{\alpha_i + \beta_i + n}. +$$ + +By the strong law of large numbers, $k/n \to p^*$ almost surely, so both +$\hat{p}_1^n$ and $\hat{p}_2^n$ converge to $p^*$ regardless of the +agents' initial priors $(\alpha_i, \beta_i)$. + +### The marginal likelihood and likelihood ratio + +The marginal probability assigned by agent $i$ to the observed sequence +$x^n$ (with $k$ successes, in any order) is + +$$ +P_i(x^n) += \frac{B(\alpha_i + k,\; \beta_i + n - k)}{B(\alpha_i,\, \beta_i)}, +$$ + +where $B(a,b) = \Gamma(a)\Gamma(b)/\Gamma(a+b)$ is the beta function. + +The likelihood ratio at time $n$ is therefore + +$$ +Z_n = \frac{P_{1,n}(x^n)}{P_{2,n}(x^n)} += \frac{B(\alpha_2,\, \beta_2)}{B(\alpha_1,\, \beta_1)} +\cdot +\frac{B(\alpha_1 + k,\, \beta_1 + n - k)}{B(\alpha_2 + k,\, \beta_2 + n - k)}. +$$ + +This is a martingale under $P_2$ (agent 2's probability) and converges +almost surely to a finite positive limit $Z_\infty$, reflecting the fact +that $P_1 \sim P_2$ for any Beta priors with positive parameters. + +### The exact Blackwell–Dubins distance + +For the Beta–Bernoulli model, there is a clean formula for $d_n$. +By de Finetti's theorem, each agent's conditional distribution of the +*future infinite sequence* given the past is a mixture of IID Bernoulli$(p)$ +processes, where $p$ is drawn from the posterior Beta distribution. +Since the Bernoulli$(p)^{\infty}$ measures for different $p$ are mutually +singular (the empirical frequency identifies $p$ exactly), the TV distance +between the two conditional distributions over the future equals the TV +distance between the two posterior distributions over the parameter $p$: + +$$ +d_n += \bigl\|\mathrm{Beta}(\alpha_1 + k_n,\,\beta_1 + n - k_n) +- \mathrm{Beta}(\alpha_2 + k_n,\,\beta_2 + n - k_n)\bigr\|_{\mathrm{TV}}. +$$ + +As $k_n/n \to p^*$ and $n \to \infty$, both posterior Betas concentrate +around $p^*$ with the same variance $O(1/n)$, so $d_n \to 0$. + +--- + +## Python: Merging in Action + +We set up helper functions and then run the main simulation. + +```{code-cell} ipython3 +# ------------------------------------------------------------------------- +# Helper functions for the Beta-Bernoulli example +# ------------------------------------------------------------------------- + +def beta_bernoulli_update(data, a0, b0): + """ + Sequential Beta-Bernoulli Bayesian updating. + + Parameters + ---------- + data : 1-D array of 0s and 1s + a0, b0 : float + Prior Beta parameters. + + Returns + ------- + a_post, b_post : 1-D arrays of length len(data) + 1 + Posterior parameters after 0, 1, ..., len(data) observations. + Index 0 is the prior. + """ + n = len(data) + cum_k = np.concatenate([[0], np.cumsum(data)]) # cumulative successes + ns = np.arange(n + 1) # 0, 1, ..., n + a_post = a0 + cum_k + b_post = b0 + (ns - cum_k) + return a_post, b_post + + +def predictive_prob(a_post, b_post): + """One-step-ahead predictive probability P(X=1 | data).""" + return a_post / (a_post + b_post) + + +def tv_distance_beta(a1, b1, a2, b2, n_grid=2000): + """ + TV distance between Beta(a1,b1) and Beta(a2,b2) via grid quadrature. + Uses a fine grid on (0,1); fast because it is fully vectorised. + """ + x = np.linspace(1e-8, 1 - 1e-8, n_grid) + dx = x[1] - x[0] + p1 = beta_dist.pdf(x, a1, b1) + p2 = beta_dist.pdf(x, a2, b2) + return 0.5 * np.sum(np.abs(p1 - p2)) * dx + + +def log_likelihood_ratio(data, a1, b1, a2, b2): + """ + Log likelihood ratio log Z_n = log P1_n(data) - log P2_n(data) + for every prefix of `data`. + + Returns an array of length len(data) + 1, starting at 0 (before data). + """ + a1p, b1p = beta_bernoulli_update(data, a1, b1) + a2p, b2p = beta_bernoulli_update(data, a2, b2) + log_P1 = betaln(a1p, b1p) - betaln(a1, b1) + log_P2 = betaln(a2p, b2p) - betaln(a2, b2) + return log_P1 - log_P2 + + +def run_simulation(p_true, a1, b1, a2, b2, n_steps, seed=0): + """ + Simulate one realisation of the merging experiment. + + Returns a dict with arrays of length n_steps + 1 (index 0 = prior). + """ + rng = np.random.default_rng(seed) + data = rng.binomial(1, p_true, n_steps) + + a1p, b1p = beta_bernoulli_update(data, a1, b1) + a2p, b2p = beta_bernoulli_update(data, a2, b2) + + pred1 = predictive_prob(a1p, b1p) + pred2 = predictive_prob(a2p, b2p) + tv_1step = np.abs(pred1 - pred2) + + # TV between posterior Betas — the exact Blackwell-Dubins d_n + tv_beta = np.array([ + tv_distance_beta(a1p[i], b1p[i], a2p[i], b2p[i]) + for i in range(n_steps + 1) + ]) + + log_Z = log_likelihood_ratio(data, a1, b1, a2, b2) + + return dict(data=data, pred1=pred1, pred2=pred2, + tv_1step=tv_1step, tv_beta=tv_beta, log_Z=log_Z) +``` + +### The main merging figure + +We choose two agents with very different beliefs about the bias of a coin +whose true probability of heads is $p^* = 0.65$. + +- **Agent 1** (skeptic): prior $\mathrm{Beta}(1, 8)$, so + $\hat{p}_1^0 = 1/9 \approx 0.11$. +- **Agent 2** (optimist): prior $\mathrm{Beta}(8, 1)$, so + $\hat{p}_2^0 = 8/9 \approx 0.89$. + +Both priors are supported on all of $(0,1)$, so $P_1 \sim P_2$. +Blackwell–Dubins guarantees merging; the question is only how fast. + +```{code-cell} ipython3 +# ------------------------------------------------------------------------- +# Simulation parameters +# ------------------------------------------------------------------------- +p_true = 0.65 +a1, b1 = 1.0, 8.0 # sceptic: prior mean = 1/9 ≈ 0.11 +a2, b2 = 8.0, 1.0 # optimist: prior mean = 8/9 ≈ 0.89 +n_steps = 600 + +sim = run_simulation(p_true, a1, b1, a2, b2, n_steps, seed=7) +steps = np.arange(n_steps + 1) + +# ------------------------------------------------------------------------- +# Figure 1: merging of predictive distributions and TV distance +# ------------------------------------------------------------------------- +fig, axes = plt.subplots(2, 2, figsize=(11, 7)) +fig.suptitle("Merging of Opinions: Beta–Bernoulli Model", fontsize=13) + +# --- Panel (a): posterior predictive probabilities --- +ax = axes[0, 0] +ax.plot(steps, sim['pred1'], color='steelblue', lw=1.5, + label=r'Agent 1 $\hat p_1^n$ (prior: sceptic)') +ax.plot(steps, sim['pred2'], color='firebrick', lw=1.5, + label=r'Agent 2 $\hat p_2^n$ (prior: optimist)') +ax.axhline(p_true, color='black', lw=1.0, ls='--', label=f'Truth $p^*={p_true}$') +ax.set_xlabel('Observations $n$') +ax.set_ylabel('Predictive probability') +ax.set_title('(a) Posterior predictive means') +ax.legend(fontsize=8) +ax.set_ylim(0, 1) + +# --- Panel (b): TV distance (exact Blackwell-Dubins d_n) --- +ax = axes[0, 1] +ax.semilogy(steps, sim['tv_beta'] + 1e-10, color='purple', lw=1.5) +ax.set_xlabel('Observations $n$') +ax.set_ylabel(r'$d_n = \|P(\cdot|\mathscr{F}_n) - Q(\cdot|\mathscr{F}_n)\|_{\mathrm{TV}}$') +ax.set_title(r'(b) Total-variation distance $d_n \to 0$') +ax.set_ylim(bottom=1e-4) + +# --- Panel (c): log likelihood ratio --- +ax = axes[1, 0] +ax.plot(steps, sim['log_Z'], color='darkorange', lw=1.5) +ax.axhline(0, color='black', lw=0.8, ls=':') +ax.set_xlabel('Observations $n$') +ax.set_ylabel(r'$\log Z_n$') +ax.set_title(r'(c) Log likelihood ratio $\log Z_n \to \log Z_\infty$') + +# --- Panel (d): posterior Beta densities at selected epochs --- +ax = axes[1, 1] +xs = np.linspace(0.01, 0.99, 500) +epochs = [0, 20, 100, n_steps] +colors = plt.cm.viridis(np.linspace(0.2, 0.85, len(epochs))) +cum_k = int(np.sum(sim['data'])) # total successes in full sample + +for epoch, col in zip(epochs, colors): + k_e = int(np.sum(sim['data'][:epoch])) + pdf1 = beta_dist.pdf(xs, a1 + k_e, b1 + epoch - k_e) + pdf2 = beta_dist.pdf(xs, a2 + k_e, b2 + epoch - k_e) + ax.plot(xs, pdf1, color=col, lw=1.6, ls='-') + ax.plot(xs, pdf2, color=col, lw=1.6, ls='--', + label=f'$n={epoch}$' if (epoch == 0 or epoch == n_steps) else None) + +ax.axvline(p_true, color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$') +ax.set_xlabel('$p$') +ax.set_ylabel('Posterior density') +ax.set_title('(d) Posterior Beta densities over time\n' + '(solid = Agent 1, dashed = Agent 2)') +# Custom legend +from matplotlib.lines import Line2D +handles = [Line2D([0],[0], color=colors[0], lw=1.6, label='$n=0$ (prior)'), + Line2D([0],[0], color=colors[-1], lw=1.6, label=f'$n={n_steps}$'), + Line2D([0],[0], color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$')] +ax.legend(handles=handles, fontsize=8) +ax.set_ylim(bottom=0) + +plt.tight_layout() +plt.show() +``` + +The four panels tell a coherent story: + +- **Panel (a)**: Starting from $\hat{p}_1^0 \approx 0.11$ and + $\hat{p}_2^0 \approx 0.89$, both agents' predictive probabilities + converge to $p^* = 0.65$. +- **Panel (b)**: The total-variation distance $d_n$ decays to zero on a + logarithmic scale, consistent with the theorem. +- **Panel (c)**: The log likelihood ratio $\log Z_n$ converges to a finite + value — confirming that the two measures are mutually absolutely continuous + and that neither singular case applies. +- **Panel (d)**: The posterior Beta densities for the two agents start far + apart (one near 0, one near 1) and progressively concentrate to the same + distribution centred on the truth. + +--- + +## Almost-Sure Convergence Across Many Paths + +To see the "almost-sure" character of the theorem, we run many independent +replications. On *every* path the TV distance should converge to zero, +not just on average. + +```{code-cell} ipython3 +# ------------------------------------------------------------------------- +# Simulate N_paths independent realisations +# ------------------------------------------------------------------------- +N_paths = 80 +n_steps = 500 + +fig, axes = plt.subplots(1, 2, figsize=(11, 4)) +fig.suptitle("Almost-sure merging across sample paths", fontsize=12) + +ax_tv = axes[0] +ax_log = axes[1] + +tv_all = np.empty((N_paths, n_steps + 1)) +logZ_all = np.empty((N_paths, n_steps + 1)) +steps = np.arange(n_steps + 1) + +for i in range(N_paths): + s = run_simulation(p_true, a1, b1, a2, b2, n_steps, seed=i) + tv_all[i] = s['tv_beta'] + logZ_all[i] = s['log_Z'] + +# --- Panel (a): TV distance paths --- +for i in range(N_paths): + ax_tv.semilogy(steps, tv_all[i] + 1e-10, color='steelblue', + lw=0.5, alpha=0.3) +ax_tv.semilogy(steps, tv_all.mean(axis=0) + 1e-10, + color='black', lw=2, label='Cross-path mean') +ax_tv.set_xlabel('Observations $n$') +ax_tv.set_ylabel(r'$d_n$ (log scale)') +ax_tv.set_title(r'(a) TV distance $d_n \to 0$ on every path') +ax_tv.legend() + +# --- Panel (b): log Z_n paths --- +for i in range(N_paths): + ax_log.plot(steps, logZ_all[i], color='firebrick', + lw=0.5, alpha=0.3) +ax_log.plot(steps, logZ_all.mean(axis=0), + color='black', lw=2, label='Cross-path mean') +ax_log.axhline(0, color='gray', lw=0.8, ls=':') +ax_log.set_xlabel('Observations $n$') +ax_log.set_ylabel(r'$\log Z_n$') +ax_log.set_title(r'(b) Likelihood ratio $\log Z_n$ converges on every path') +ax_log.legend() + +plt.tight_layout() +plt.show() + +# Fraction of paths on which d_n < 0.01 at the final step +frac_small = np.mean(tv_all[:, -1] < 0.01) +print(f"Fraction of paths with d_n < 0.01 at n = {n_steps}: {frac_small:.2f}") +``` + +As predicted, $d_n \to 0$ on essentially every sample path: +the theorem gives an almost-sure guarantee, not merely a statement about +expected values. + +--- + +## The Supermartingale Property of $d_n$ + +The proof relies on $\{d_n\}$ being a non-negative supermartingale. +We can verify this numerically by checking that $d_n$ tends to decrease +over time and that its conditional expectation does not increase. + +```{code-cell} ipython3 +# ------------------------------------------------------------------------- +# Verify the supermartingale property: +# E_Q[d_{n+1} | F_n] <= d_n +# ------------------------------------------------------------------------- +# Proxy: average d_{n+1} - d_n across many paths should be <= 0. + +diffs = np.diff(tv_all, axis=1) # shape (N_paths, n_steps) +mean_diffs = diffs.mean(axis=0) # average increment at each step +cum_sum = np.cumsum(mean_diffs) # cumulative average change + +fig, axes = plt.subplots(1, 2, figsize=(10, 4)) + +ax = axes[0] +ax.plot(mean_diffs[:200], color='purple', lw=1.2) +ax.axhline(0, color='black', lw=0.8, ls='--') +ax.fill_between(range(200), mean_diffs[:200], 0, + where=(mean_diffs[:200] < 0), + alpha=0.25, color='purple', label='Decrements (negative)') +ax.fill_between(range(200), mean_diffs[:200], 0, + where=(mean_diffs[:200] > 0), + alpha=0.25, color='red', label='Increments (positive)') +ax.set_xlabel('Observations $n$') +ax.set_ylabel(r'$\mathbb{E}[d_{n+1} - d_n]$') +ax.set_title(r'(a) Average increments of $d_n$') +ax.legend(fontsize=8) + +ax = axes[1] +ax.plot(cum_sum[:200], color='darkorange', lw=1.5) +ax.axhline(0, color='black', lw=0.8, ls='--') +ax.set_xlabel('Observations $n$') +ax.set_ylabel(r'Cumulative average change in $d_n$') +ax.set_title(r'(b) Cumulative drift: net decrease confirms supermartingale') + +plt.tight_layout() +plt.show() + +frac_decrease = np.mean(mean_diffs < 0) +print(f"Fraction of steps with average decrement: {frac_decrease:.2%}") +``` + +The average increment is negative at most steps, and the cumulative drift +is monotonically downward. This is the numerical signature of the +supermartingale property $\mathbb{E}_Q[d_{n+1}\,|\,\mathscr{F}_n] \leq d_n$. + +--- + +## Failure of Merging: Mutual Singularity + +What happens when the hypothesis of the theorem fails — that is, when +$P \not\ll Q$? The answer is both instructive and stark. + +### Point-mass priors + +Suppose both agents hold degenerate (point-mass) priors: + +- **Agent P**: certain that $p = p_P = 0.30$. +- **Agent Q**: certain that $p = p_Q = 0.75$. + +Since $P$ charges only sequences whose empirical frequency converges to +$0.30$, and $Q$ charges only sequences whose empirical frequency converges +to $0.75$, the two measures are mutually **singular**: $P \perp Q$. + +The conditional distributions do not update — both agents are certain of +their model — so + +$$ +\|P(\,\cdot\,|\,\mathscr{F}_n) - Q(\,\cdot\,|\,\mathscr{F}_n)\|_{\mathrm{TV}} += \|P - Q\|_{\mathrm{TV}} = |p_P - p_Q| = 0.45 +\quad \text{for all } n. +$$ + +The empirical frequency converges to the truth under the true measure, +which means one agent's model is eventually falsified (and indeed, under +$Q$, the empirical frequency $\to 0.75$ reveals that agent $P$'s model +is wrong with probability 1). + +```{code-cell} ipython3 +# ------------------------------------------------------------------------- +# Failure of merging: mutually singular point-mass priors +# ------------------------------------------------------------------------- +fig, axes = plt.subplots(1, 2, figsize=(11, 4)) +fig.suptitle("Failure of Merging: Mutually Singular Priors ($P \\perp Q$)", + fontsize=12) + +# True data drawn under Q's model (p_Q = 0.75) +p_P = 0.30 # agent P's fixed belief +p_Q = 0.75 # agent Q's fixed belief ← truth +n_steps = 500 + +rng = np.random.default_rng(1) +data = rng.binomial(1, p_Q, n_steps) + +# Empirical frequency of successes +emp_freq = np.cumsum(data) / np.arange(1, n_steps + 1) + +# 1-step predictive TV distance (constant: |p_P - p_Q|) +tv_singular = np.full(n_steps, np.abs(p_P - p_Q)) + +# For comparison: run a Beta-Bernoulli merging experiment with the same truth +sim_abs_cont = run_simulation(p_Q, 1.0, 8.0, 8.0, 1.0, n_steps, seed=1) + +# --- Panel (a): empirical frequency --- +ax = axes[0] +ax.plot(np.arange(1, n_steps + 1), emp_freq, + color='steelblue', lw=1.5, label='Empirical frequency $k_n/n$') +ax.axhline(p_Q, color='firebrick', lw=1.2, ls='--', + label=f'Truth $p_Q = {p_Q}$') +ax.axhline(p_P, color='gray', lw=1.2, ls=':', + label=f"Agent P's belief $p_P = {p_P}$") +ax.set_xlabel('Observations $n$') +ax.set_ylabel('Probability') +ax.set_title('(a) Empirical frequency converges to truth') +ax.legend(fontsize=8) +ax.set_ylim(0, 1) + +# --- Panel (b): TV distance comparison --- +ax = axes[1] +ax.plot(np.arange(1, n_steps + 1), tv_singular, + color='firebrick', lw=2.0, + label=r'Singular priors: $d_n = |p_P - p_Q| = 0.45$') +ax.semilogy(np.arange(n_steps + 1), + sim_abs_cont['tv_beta'] + 1e-10, + color='steelblue', lw=2.0, + label=r'$\mathrm{Beta}(1,8)$ vs $\mathrm{Beta}(8,1)$: $d_n \to 0$') +ax.set_xlabel('Observations $n$') +ax.set_ylabel(r'$d_n$') +ax.set_title('(b) TV distance: merging vs non-merging') +ax.legend(fontsize=8) + +plt.tight_layout() +plt.show() +``` + +The contrast is vivid. With mutually absolutely continuous priors (blue), +the total-variation distance decays to zero as Blackwell–Dubins guarantees. +With mutually singular point-mass priors (red), the distance stays +permanently at $|p_P - p_Q| = 0.45$. More data never resolves the +disagreement — the two agents are committed to models that are +separated by events they each regard as having probability zero under +the other's measure. + +--- + +## Kakutani's Theorem: When Does Merging Hold? + +A natural question is: for which product measures does the Blackwell–Dubins +hypothesis $P \ll Q$ hold? For infinite product measures, the answer is +given by a classical result of {cite:t}`kakutani1948`. + +### Hellinger affinities + +```{admonition} Definition +:class: tip +**Hellinger affinity.** +For probability measures $P_n$ and $Q_n$ on $(S, \mathscr{S})$ with common +dominating measure $\lambda$, the *Hellinger affinity* is + +$$ +\rho_n = \int_S \sqrt{\frac{dP_n}{d\lambda} \cdot \frac{dQ_n}{d\lambda}}\,d\lambda +\;\in\; (0, 1]. +$$ + +$\rho_n = 1$ if and only if $P_n = Q_n$. +``` + +For two specific one-dimensional families: + +- **Gaussian**: $P_n = \mathcal{N}(\mu_n, 1)$ vs $Q_n = \mathcal{N}(0,1)$: + +$$ +\rho_n^{\text{Gauss}} = \exp\!\left(-\frac{\mu_n^2}{8}\right). +$$ + +- **Bernoulli**: $P_n = \mathrm{Bernoulli}(p)$ vs $Q_n = \mathrm{Bernoulli}(q)$: + +$$ +\rho_n^{\text{Bern}} = \sqrt{pq} + \sqrt{(1-p)(1-q)}. +$$ + +### Kakutani's dichotomy + +```{admonition} Theorem (Kakutani, 1948) +:class: important +Let $P = \bigotimes_{n=1}^\infty P_n$ and $Q = \bigotimes_{n=1}^\infty Q_n$ +be infinite product measures. Then either $P \sim Q$ or $P \perp Q$; there +is no intermediate case. Specifically, + +$$ +P \sim Q +\quad \iff \quad +\prod_{n=1}^\infty \rho_n > 0 +\quad \iff \quad +\sum_{n=1}^\infty (1 - \rho_n) < \infty. +$$ + +If $\prod_{n=1}^\infty \rho_n = 0$, then $P \perp Q$. + +*Proof sketch.* +The likelihood ratio $Z_N = \prod_{n=1}^N (dP_n/dQ_n)$ is a $Q$-martingale. +Its $L^{1/2}(Q)$ norm equals $\prod_{n=1}^N \rho_n$. +If $\prod \rho_n > 0$: the martingale is bounded in $L^{1/2}$, hence +uniformly integrable in $L^1$, giving $P \ll Q$. +If $\prod \rho_n = 0$: $Z_N \to 0$ in $L^{1/2}$, so $Z_\infty = 0$ +$Q$-a.s. and $P \perp Q$. $\square$ +``` + +### Implication for merging + +For i.i.d.-type sequences, Kakutani's theorem gives the following picture: + +| Scenario | $\sum_n (1-\rho_n)$ | Conclusion | Merging? | +|---|---|---|---| +| $P_n = Q_n$ for all $n$ | $0$ | $P = Q$ | Trivially yes | +| $P_n \ne Q_n$ with $\sum_n (1-\rho_n) < \infty$ | Finite | $P \sim Q$ | Yes — Blackwell–Dubins applies | +| $P_n = P \ne Q = Q_n$ fixed, $n \ge 1$ | $\infty$ | $P \perp Q$ | No | + +The i.i.d. case with different fixed marginals is the most common "no merging" +scenario: if two agents ascribe permanently different distributions to each +observation, they will eventually be in completely disjoint worlds. + +### Python: the Gaussian product measure example + +We illustrate Kakutani's dichotomy with Gaussian product measures, +$Q = \mathcal{N}(0,1)^{\otimes\mathbb{N}}$ as the reference measure and +$P = \bigotimes_n \mathcal{N}(\mu_n,1)$ as the alternative. + +Three choices of $\mu_n$: + +1. $\mu_n = \mu > 0$ **constant** ($\sum (1-\rho_n) = \infty$) $\Rightarrow P \perp Q$. +2. $\mu_n = c/\!\sqrt{n}$ ($\sum (1-\rho_n) \approx \sum c^2/(8n) = \infty$) $\Rightarrow P \perp Q$. +3. $\mu_n = c/n$ ($\sum (1-\rho_n) \approx \sum c^2/(8n^2) < \infty$) $\Rightarrow P \sim Q$. + +```{code-cell} ipython3 +# ------------------------------------------------------------------------- +# Kakutani's theorem: Gaussian product measures +# ------------------------------------------------------------------------- +from scipy.stats import norm as scipy_norm + +def hellinger_affinity_gauss(mu_n): + """Hellinger affinity between N(mu_n, 1) and N(0, 1).""" + return np.exp(-mu_n**2 / 8.0) + + +N_max = 2000 +ns = np.arange(1, N_max + 1) +c = 2.0 # scale parameter common to all sequences + +sequences = { + r'Constant: $\mu_n = c$': + np.full(N_max, c), + r'$\mu_n = c / \sqrt{n}$': + c / np.sqrt(ns), + r'$\mu_n = c / n$ ($P \sim Q$)': + c / ns, +} + +fig, axes = plt.subplots(1, 3, figsize=(13, 4)) +fig.suptitle(r"Kakutani's Dichotomy: Gaussian Product Measures ($c = 2$)", + fontsize=12) + +colors_k = ['firebrick', 'darkorange', 'steelblue'] +labels_k = list(sequences.keys()) + +# --- Panel (a): log Hellinger product log prod rho_n = sum log rho_n --- +ax = axes[0] +for (label, mu_seq), col in zip(sequences.items(), colors_k): + rho = hellinger_affinity_gauss(mu_seq) + log_prod = np.cumsum(np.log(rho)) + ax.plot(ns, log_prod, color=col, lw=1.8, label=label) +ax.axhline(0, color='black', lw=0.8, ls=':') +ax.set_xlabel('$N$') +ax.set_ylabel(r'$\log \prod_{n=1}^{N} \rho_n$') +ax.set_title(r'(a) Log Hellinger product') +ax.legend(fontsize=7.5) + +# --- Panel (b): sum of (1 - rho_n) --- +ax = axes[1] +for (label, mu_seq), col in zip(sequences.items(), colors_k): + rho = hellinger_affinity_gauss(mu_seq) + cum_sum = np.cumsum(1 - rho) + ax.plot(ns, cum_sum, color=col, lw=1.8, label=label) +ax.set_xlabel('$N$') +ax.set_ylabel(r'$\sum_{n=1}^{N}(1 - \rho_n)$') +ax.set_title(r'(b) Cumulative $\sum (1-\rho_n)$: finite $\Leftrightarrow$ $P \sim Q$') +ax.legend(fontsize=7.5) + +# --- Panel (c): simulated log Z_N trajectories --- +rng = np.random.default_rng(0) +N_plot = 400 + +ax = axes[2] +for (label, mu_seq), col in zip(sequences.items(), colors_k): + # generate data from Q = N(0,1) + x = rng.standard_normal(N_plot) + # log Z_N = sum_{n=1}^N [mu_n * x_n - mu_n^2 / 2] + log_Z_increments = mu_seq[:N_plot] * x - mu_seq[:N_plot]**2 / 2.0 + log_Z_path = np.concatenate([[0], np.cumsum(log_Z_increments)]) + ax.plot(np.arange(N_plot + 1), log_Z_path, color=col, lw=1.5, label=label) + +ax.axhline(0, color='black', lw=0.8, ls=':') +ax.set_xlabel('$N$') +ax.set_ylabel(r'$\log Z_N$ (one trajectory under $Q$)') +ax.set_title('(c) Likelihood ratio: diverges or converges') +ax.legend(fontsize=7.5) + +plt.tight_layout() +plt.show() +``` + +The three panels confirm Kakutani's theorem: + +- **Constant drift** (red): $\log \prod \rho_n \to -\infty$ and + $\sum (1-\rho_n) \to \infty$; the likelihood ratio drifts to $-\infty$ + under $Q$, so $Z_N \to 0$ $Q$-a.s. and $P \perp Q$. +- **$\mu_n = c/\sqrt{n}$** (orange): the same qualitative picture. + Despite the drift vanishing, it does so too slowly. +- **$\mu_n = c/n$** (blue): $\sum (1-\rho_n) < \infty$, the log Hellinger + product stabilises to a finite limit, and the likelihood ratio converges + to a finite positive value — confirming $P \sim Q$. + +Only in the third case does Blackwell–Dubins apply and merging occur. + +--- + +## Extension to Continuous Time + +The Blackwell–Dubins theorem extends naturally to continuous time. + +### Girsanov's theorem and the likelihood-ratio process + +On the canonical Wiener space with $Q$ the Wiener measure (standard +Brownian motion $W$), suppose agent $P$ believes the process has an +additional drift $\theta = \{\theta_s\}_{s \geq 0}$: + +$$ +W_t = \widetilde{W}_t + \int_0^t \theta_s\, ds, +$$ + +where $\widetilde{W}$ is a $P$-Brownian motion. The +**Girsanov–Cameron–Martin theorem** {cite:p}`girsanov1960` gives the +likelihood-ratio process as the stochastic exponential + +$$ +Z_t += \exp\!\left(\int_0^t \theta_s\, dW_s - \frac{1}{2}\int_0^t \theta_s^2\, ds\right). +$$ + +$Z_t$ is always a non-negative $Q$-local martingale: it is a true martingale +if and only if $\mathbb{E}_Q[Z_t] = 1$ for all $t$. + +**Novikov's condition** {cite:p}`novikov1972` — $\mathbb{E}_Q\!\left[\exp\!\left(\tfrac{1}{2}\int_0^T \theta_s^2\,ds\right)\right] < \infty$ for all $T$ — is sufficient. + +### The dichotomy at infinity + +A key subtlety on $[0,+\infty)$: local absolute continuity ($P_t \ll Q_t$ +for every finite $t$) does *not* imply global absolute continuity +($P \ll Q$ on $\mathscr{F}_\infty$). + +```{admonition} Proposition (Dichotomy at infinity) +:class: note +Suppose $Z_t$ is a true $Q$-martingale for every finite horizon. Then +$Z_t \to Z_\infty$ $Q$-a.s., and exactly one of the following holds: + +1. $\{Z_t\}$ is uniformly integrable over $[0,\infty)$: + $P \ll Q$ on $\mathscr{F}_\infty$, with $dP/dQ = Z_\infty > 0$ $P$-a.s. + +2. $\{Z_t\}$ is *not* uniformly integrable: + $Z_\infty = 0$ $Q$-a.s. and $P \perp Q$ on $\mathscr{F}_\infty$. +``` + +A sufficient condition for case 1 is the **energy condition** + +$$ +\int_0^\infty \theta_s^2\,ds < \infty \quad Q\text{-a.s.} +$$ + +Intuitively, this says the total amount of information that separates the +two measures over the infinite horizon is finite. When $\theta$ is a +non-zero constant — meaning the two agents predict permanently different +drifts — the energy condition fails, $P \perp Q$, and merging cannot occur. + +With $P \ll Q$ on $\mathscr{F}_\infty$ established, the proof of the +continuous-time Blackwell–Dubins theorem is identical to the discrete-time +proof: $\{d_t, \mathscr{F}_t\}$ is a non-negative $Q$-supermartingale in +$[0,1]$, so $d_t \to d_\infty$ $Q$-a.s.; the $L^1$ bound +$\mathbb{E}_Q[d_t] \leq \tfrac{1}{2}\mathbb{E}_Q[|Z_t - Z_\infty|] \to 0$ +forces $d_\infty = 0$. + +--- + +## Applications + +### Bayesian learning + +The most direct application is to Bayesian inference. Suppose data +$(x_1, x_2, \ldots)$ are drawn from the true measure $Q^*$. An agent +holds a prior $\pi$ over a family $\{Q_\theta : \theta \in \Theta\}$, +inducing a marginal $P = \int Q_\theta\,\pi(d\theta)$. If $P \ll Q^*$ +(equivalently: the prior assigns positive probability to every +neighbourhood of the true model), then by Blackwell–Dubins, + +$$ +\bigl\|P(\,\cdot\,|\,x_1,\ldots,x_n) - Q^*(\,\cdot\,|\,x_1,\ldots,x_n)\bigr\|_{\mathrm{TV}} +\to 0 \quad Q^*\text{-a.s.} +$$ + +This is a strong form of **Bayesian consistency**: the agent's predictions +become indistinguishable from the truth, regardless of the specific prior, +as long as the prior is in the right absolute-continuity class. + +{cite:t}`diaconis1986` establish that absolute continuity of the prior +with respect to the truth is not just sufficient but essentially *necessary* +for Doob consistency. When $P \perp Q^*$, there exist events of probability +one under $Q^*$ that have probability zero under $P$, so the agent maintains +fundamentally wrong beliefs forever. + +### Rational expectations and heterogeneous priors + +In macroeconomics, the **common prior assumption** embedded in rational +expectations models requires all agents to agree on the probability model +for the economy. Blackwell–Dubins provides a dynamic justification: if +two agents start with heterogeneous but mutually absolutely continuous +priors and observe a common history, their conditional forecasts will +eventually agree on every event, even if they never explicitly coordinate +their beliefs. + +{cite:t}`aumann1976`'s **agreement theorem** strengthens this: agents with +a common prior cannot "agree to disagree" on posterior probabilities. +Blackwell–Dubins complements Aumann by showing that even without a common +prior, merging occurs eventually if the initial priors are equivalent. + +### Ergodic Markov chains + +For a Markov chain with transition kernel $\Pi$ and two initial +distributions $\mu$ and $\nu$, the $n$-step distributions are $\mu\Pi^n$ +and $\nu\Pi^n$. If $\Pi$ is ergodic with unique stationary distribution +$\pi$, both converge to $\pi$, so + +$$ +\|\mu\Pi^n - \nu\Pi^n\|_{\mathrm{TV}} +\leq \|\mu\Pi^n - \pi\|_{\mathrm{TV}} + \|\nu\Pi^n - \pi\|_{\mathrm{TV}} +\to 0. +$$ + +This is a special form of merging that does *not* require absolute +continuity, because ergodicity already forces both distributions to the same +limit. Blackwell–Dubins is the appropriate generalisation for +**non-ergodic** or **non-Markovian** processes, where no single invariant +measure exists and the operative condition is absolute continuity of the +initial priors. + +--- + +## The Rate of Merging + +Blackwell–Dubins gives only almost-sure convergence; it says nothing about +*how fast* $d_n \to 0$. The bound + +$$ +\mathbb{E}_Q[d_n] \leq \tfrac{1}{2}\,\mathbb{E}_Q[|Z_n - Z_\infty|] +$$ + +shows that the rate of merging is controlled by the $L^1$ convergence +rate of the likelihood ratio martingale. + +For parametric Bayesian models, the posterior contracts at the +$n^{-1/2}$ rate (Bernstein–von Mises theorem), which implies +$d_n = O(n^{-1/2})$ in expectation. The following figure illustrates +this for our Beta–Bernoulli model. + +```{code-cell} ipython3 +# ------------------------------------------------------------------------- +# Rate of merging: compare d_n to n^{-1/2} +# ------------------------------------------------------------------------- +N_paths_rate = 200 +n_steps_rate = 800 + +tv_rate = np.empty((N_paths_rate, n_steps_rate + 1)) +for i in range(N_paths_rate): + s = run_simulation(p_true, a1, b1, a2, b2, n_steps_rate, seed=100 + i) + tv_rate[i] = s['tv_beta'] + +ns_rate = np.arange(1, n_steps_rate + 1) +mean_tv = tv_rate[:, 1:].mean(axis=0) # mean d_n, n = 1, ..., n_steps_rate + +# Fit a reference line d_n ~ C / sqrt(n) +log_ns = np.log(ns_rate[10:]) +log_tv = np.log(mean_tv[10:] + 1e-12) +coeffs = np.polyfit(log_ns, log_tv, 1) +slope = coeffs[0] + +# Reference curve C/sqrt(n) +C_ref = np.exp(coeffs[1]) +ref_curve = C_ref / np.sqrt(ns_rate) + +fig, ax = plt.subplots(figsize=(8, 4)) +ax.loglog(ns_rate, mean_tv, color='steelblue', lw=2, + label=r'$\mathbb{E}_Q[d_n]$ (Monte Carlo)') +ax.loglog(ns_rate, ref_curve, color='firebrick', lw=1.5, ls='--', + label=rf'Reference $C/\sqrt{{n}}$ (fitted slope $\approx {slope:.2f}$)') +ax.set_xlabel('$n$') +ax.set_ylabel(r'$\mathbb{E}_Q[d_n]$') +ax.set_title(r'Rate of merging: $\mathbb{E}_Q[d_n] = O(n^{-1/2})$') +ax.legend() +plt.tight_layout() +plt.show() + +print(f"Fitted log-log slope: {slope:.3f} (predicted: -0.50)") +``` + +The log–log slope of approximately $-0.5$ confirms the $O(n^{-1/2})$ rate +for this parametric model on any single sample path. + +--- + +## Summary + +The main logical chain underlying the Blackwell–Dubins theorem is: + +$$ +P \ll Q +\;\Longrightarrow\; +Z_n = \mathbb{E}_Q[Z_\infty\,|\,\mathscr{F}_n] +\text{ uniformly integrable} +\;\Longrightarrow\; +Z_n \xrightarrow{L^1(Q)} Z_\infty +\;\Longrightarrow\; +d_n \xrightarrow{Q\text{-a.s.}} 0. +$$ + +Key takeaways: + +1. **Mutual absolute continuity** is the operative condition. + It is a statement about which events are deemed *possible*, not about + how likely they are. Two agents can disagree wildly on probabilities + yet still be guaranteed to eventually agree — provided neither agent + rules out events the other considers possible. + +2. **The likelihood-ratio martingale** is the central object. + Its $L^1$ convergence (guaranteed by uniform integrability under $P \ll Q$) + drives the almost-sure convergence of the total-variation distance $d_n$. + +3. **The supermartingale structure of $d_n$** provides the almost-sure + convergence: more data can only reduce (in expectation) the difficulty + of telling two hypotheses apart. + +4. **Kakutani's theorem** tells us when $P \ll Q$ holds for product + measures: precisely when the Hellinger affinities satisfy + $\sum_n (1 - \rho_n) < \infty$. + +5. **There is a sharp dichotomy**: either $P \sim Q$ (merging) or + $P \perp Q$ (permanent disagreement). There is no middle ground. + + From ce9e17250e4eec9110b6f37424c07163a896910d Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Wed, 1 Apr 2026 21:07:02 +1100 Subject: [PATCH 04/20] updates --- lectures/blackwell_kihlstrom.md | 1 - lectures/merging_of_opinions.md | 13 ------------- 2 files changed, 14 deletions(-) diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index fca0943d5..017a51d4c 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -1177,7 +1177,6 @@ In the two-state case this reduces to the familiar mean-preserving-spread compar DeGroot's contribution is to extend the comparison from particular utility functions to the full class of concave uncertainty functions. ---- ## Relation to Bayesian likelihood-ratio learning diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md index 88cd5952a..99e5ad5f7 100644 --- a/lectures/merging_of_opinions.md +++ b/lectures/merging_of_opinions.md @@ -71,7 +71,6 @@ from scipy.special import betaln np.random.seed(42) ``` ---- ## Probability Measures on Sequence Spaces @@ -167,7 +166,6 @@ $$ almost surely as $n \to \infty$. ---- ## The Likelihood-Ratio Martingale @@ -244,7 +242,6 @@ so the $L^1$ convergence of the martingale directly controls the rate at which the total variation distance between the two agents' conditional forecasts goes to zero. ---- ## The Blackwell–Dubins Theorem @@ -315,7 +312,6 @@ not merely sufficient; the dichotomy between $P \ll Q$ and $P \perp Q$ is qualitatively sharp. ``` ---- ## The Beta–Bernoulli Model @@ -394,7 +390,6 @@ $$ As $k_n/n \to p^*$ and $n \to \infty$, both posterior Betas concentrate around $p^*$ with the same variance $O(1/n)$, so $d_n \to 0$. ---- ## Python: Merging in Action @@ -594,7 +589,6 @@ The four panels tell a coherent story: apart (one near 0, one near 1) and progressively concentrate to the same distribution centred on the truth. ---- ## Almost-Sure Convergence Across Many Paths @@ -659,7 +653,6 @@ As predicted, $d_n \to 0$ on essentially every sample path: the theorem gives an almost-sure guarantee, not merely a statement about expected values. ---- ## The Supermartingale Property of $d_n$ @@ -712,7 +705,6 @@ The average increment is negative at most steps, and the cumulative drift is monotonically downward. This is the numerical signature of the supermartingale property $\mathbb{E}_Q[d_{n+1}\,|\,\mathscr{F}_n] \leq d_n$. ---- ## Failure of Merging: Mutual Singularity @@ -809,7 +801,6 @@ disagreement — the two agents are committed to models that are separated by events they each regard as having probability zero under the other's measure. ---- ## Kakutani's Theorem: When Does Merging Hold? @@ -990,7 +981,6 @@ The three panels confirm Kakutani's theorem: Only in the third case does Blackwell–Dubins apply and merging occur. ---- ## Extension to Continuous Time @@ -1056,7 +1046,6 @@ $[0,1]$, so $d_t \to d_\infty$ $Q$-a.s.; the $L^1$ bound $\mathbb{E}_Q[d_t] \leq \tfrac{1}{2}\mathbb{E}_Q[|Z_t - Z_\infty|] \to 0$ forces $d_\infty = 0$. ---- ## Applications @@ -1119,7 +1108,6 @@ limit. Blackwell–Dubins is the appropriate generalisation for measure exists and the operative condition is absolute continuity of the initial priors. ---- ## The Rate of Merging @@ -1181,7 +1169,6 @@ print(f"Fitted log-log slope: {slope:.3f} (predicted: -0.50)") The log–log slope of approximately $-0.5$ confirms the $O(n^{-1/2})$ rate for this parametric model on any single sample path. ---- ## Summary From 88e240c195f3d3edbe99093abd9fb7813b2633ca Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Wed, 1 Apr 2026 12:08:13 -0400 Subject: [PATCH 05/20] Tom's April 1 edits of new and old lectures --- lectures/_static/quant-econ.bib | 368 +++++++++++ lectures/_toc.yml | 1 + lectures/merging_of_opinions.md | 205 +++++- lectures/organization_capital.md | 1007 ++++++++++++++++++++++++++++++ 4 files changed, 1553 insertions(+), 28 deletions(-) create mode 100644 lectures/organization_capital.md diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index e495748c8..6c66b3b05 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -2,6 +2,374 @@ QuantEcon Bibliography File used in conjuction with sphinxcontrib-bibtex package Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ ### +%% References in the economics literature citing the +%% Blackwell--Dubins theorem on merging of opinions. +%% +%% Blackwell, D. and Dubins, L. (1962), +%% ``Merging of Opinions with Increasing Information,'' +%% Annals of Mathematical Statistics 33(3): 882--886. + +%% ------------------------------------------------------- +%% 1. The original paper +%% ------------------------------------------------------- + +@article{BlackwellDubins1962, + author = {Blackwell, David and Dubins, Lester}, + title = {Merging of Opinions with Increasing Information}, + journal = {Annals of Mathematical Statistics}, + year = {1962}, + volume = {33}, + number = {3}, + pages = {882--886}, + doi = {10.1214/aoms/1177704456} +} + +%% ------------------------------------------------------- +%% 2. Sandroni (1998/2000): the paper you recalled --- +%% casts doubt on the economic relevance of absolute +%% continuity and of the Blackwell--Dubins result. +%% ------------------------------------------------------- + +@article{Sandroni2000, + author = {Sandroni, Alvaro}, + title = {The Role of Absolute Continuity in + ``{Merging} of {Opinions}'' and + ``{Rational} {Learning}''}, + journal = {Games and Economic Behavior}, + year = {2000}, + volume = {29}, + number = {1--2}, + pages = {170--190}, + doi = {10.1006/game.1999.0752} +} + +%% ------------------------------------------------------- +%% 3. Jackson, Kalai, and Smorodinsky (1999): the +%% Matthew Jackson paper you recalled. Uses +%% Blackwell--Dubins to study de~Finetti-style +%% representation of stochastic processes under +%% Bayesian learning. +%% ------------------------------------------------------- + +@article{JacksonKalaiSmorodinsky1999, + author = {Jackson, Matthew O. and Kalai, Ehud and + Smorodinsky, Rann}, + title = {Bayesian Representation of Stochastic Processes + under Learning: {de Finetti} Revisited}, + journal = {Econometrica}, + year = {1999}, + volume = {67}, + number = {4}, + pages = {875--893}, + doi = {10.1111/1468-0262.00053} +} + +%% ------------------------------------------------------- +%% 4. Kalai and Lehrer (1993a): ``Rational Learning +%% Leads to Nash Equilibrium.'' The seminal +%% application of Blackwell--Dubins to game theory. +%% ------------------------------------------------------- + +@article{KalaiLehrer1993Nash, + author = {Kalai, Ehud and Lehrer, Ehud}, + title = {Rational Learning Leads to {Nash} Equilibrium}, + journal = {Econometrica}, + year = {1993}, + volume = {61}, + number = {5}, + pages = {1019--1045}, + doi = {10.2307/2951492} +} + +%% ------------------------------------------------------- +%% 5. Kalai and Lehrer (1993b): Subjective equilibrium. +%% ------------------------------------------------------- + +@article{KalaiLehrer1993Subjective, + author = {Kalai, Ehud and Lehrer, Ehud}, + title = {Subjective Equilibrium in Repeated Games}, + journal = {Econometrica}, + year = {1993}, + volume = {61}, + number = {5}, + pages = {1231--1240}, + doi = {10.2307/2951506} +} + +%% ------------------------------------------------------- +%% 6. Kalai and Lehrer (1994): Weak and strong merging. +%% Extends Blackwell--Dubins to weaker notions of +%% convergence; motivated by equilibrium convergence +%% in repeated games and dynamic economies. +%% ------------------------------------------------------- + +@article{KalaiLehrer1994Merging, + author = {Kalai, Ehud and Lehrer, Ehud}, + title = {Weak and Strong Merging of Opinions}, + journal = {Journal of Mathematical Economics}, + year = {1994}, + volume = {23}, + number = {1}, + pages = {73--86}, + doi = {10.1016/0304-4068(94)90037-X} +} + +%% ------------------------------------------------------- +%% 7. Kalai, Lehrer, and Smorodinsky (1999): Calibrated +%% forecasting and merging. +%% ------------------------------------------------------- + +@article{KalaiLehrerSmorodinsky1999, + author = {Kalai, Ehud and Lehrer, Ehud and Smorodinsky, Rann}, + title = {Calibrated Forecasting and Merging}, + journal = {Games and Economic Behavior}, + year = {1999}, + volume = {29}, + number = {1--2}, + pages = {151--169}, + doi = {10.1006/game.1999.0718} +} + +%% ------------------------------------------------------- +%% 8. Sandroni (1998): Almost absolute continuity and +%% convergence to Nash equilibrium. +%% ------------------------------------------------------- + +@article{Sandroni1998Nash, + author = {Sandroni, Alvaro}, + title = {Necessary and Sufficient Conditions for + Convergence to {Nash} Equilibrium: + The Almost Absolute Continuity Hypothesis}, + journal = {Games and Economic Behavior}, + year = {1998}, + volume = {22}, + number = {1}, + pages = {121--147}, + doi = {10.1006/game.1997.0580} +} + +%% ------------------------------------------------------- +%% 9. Pomatto, Al-Najjar, and Sandroni (2014): Merging +%% and testing opinions. Extends Blackwell--Dubins +%% to finitely additive probabilities and studies +%% the connection between merging and the +%% manipulation of statistical tests. +%% ------------------------------------------------------- + +@article{PomattoAlNajjarSandroni2014, + author = {Pomatto, Luciano and Al-Najjar, Nabil I. and + Sandroni, Alvaro}, + title = {Merging and Testing Opinions}, + journal = {Annals of Statistics}, + year = {2014}, + volume = {42}, + number = {3}, + pages = {1003--1028}, + doi = {10.1214/14-AOS1212} +} + +%% ------------------------------------------------------- +%% 10. Lehrer and Smorodinsky (1996a): Compatible measures +%% and merging. +%% ------------------------------------------------------- + +@article{LehrerSmorodinsky1996Compatible, + author = {Lehrer, Ehud and Smorodinsky, Rann}, + title = {Compatible Measures and Merging}, + journal = {Mathematics of Operations Research}, + year = {1996}, + volume = {21}, + number = {3}, + pages = {697--706}, + doi = {10.1287/moor.21.3.697} +} + +%% ------------------------------------------------------- +%% 11. Lehrer and Smorodinsky (1996b): Merging and +%% learning. In a volume on statistics, probability, +%% and game theory. +%% ------------------------------------------------------- + +@incollection{LehrerSmorodinsky1996Learning, + author = {Lehrer, Ehud and Smorodinsky, Rann}, + title = {Merging and Learning}, + booktitle = {Statistics, Probability and Game Theory: + Papers in Honor of {David Blackwell}}, + editor = {Ferguson, Thomas S. and Shapley, Lloyd S. and + MacQueen, James B.}, + series = {{IMS} Lecture Notes---Monograph Series}, + volume = {30}, + pages = {147--168}, + publisher = {Institute of Mathematical Statistics}, + address = {Hayward, CA}, + year = {1996} +} + +%% ------------------------------------------------------- +%% 12. Nyarko (1994): Bayesian learning leads to +%% correlated equilibria. Uses Blackwell--Dubins +%% to prove convergence in normal-form games. +%% ------------------------------------------------------- + +@article{Nyarko1994, + author = {Nyarko, Yaw}, + title = {Bayesian Learning Leads to Correlated Equilibria + in Normal Form Games}, + journal = {Economic Theory}, + year = {1994}, + volume = {4}, + number = {6}, + pages = {821--841}, + doi = {10.1007/BF01213814} +} + +%% ------------------------------------------------------- +%% 13. Jackson and Kalai (1999): Reputation vs. social +%% learning in recurring games; applies +%% Blackwell--Dubins and the rational-learning +%% literature. +%% ------------------------------------------------------- + +@article{JacksonKalai1999, + author = {Jackson, Matthew O. and Kalai, Ehud}, + title = {Reputation versus Social Learning}, + journal = {Journal of Economic Theory}, + year = {1999}, + volume = {88}, + number = {1}, + pages = {40--59}, + doi = {10.1006/jeth.1999.2542} +} + +%% ------------------------------------------------------- +%% 14. Acemoglu, Chernozhukov, and Yildiz (2016): +%% Learning and disagreement in an uncertain world. +%% Shows that Blackwell--Dubins-style agreement +%% can fail when agents are uncertain about signal +%% informativeness. +%% ------------------------------------------------------- + +@article{AcemogluChernozhukovYildiz2016, + author = {Acemoglu, Daron and Chernozhukov, Victor and + Yildiz, Muhamet}, + title = {Fragility of Asymptotic Agreement under + {Bayesian} Learning}, + journal = {Theoretical Economics}, + year = {2016}, + volume = {11}, + number = {1}, + pages = {187--225}, + doi = {10.3982/TE1156} +} + +%% ------------------------------------------------------- +%% 15. Diaconis and Freedman (1986): On the consistency +%% of Bayes estimates. A key probability paper +%% in the same intellectual tradition, often cited +%% alongside Blackwell--Dubins in the economics +%% learning literature. +%% ------------------------------------------------------- + +@article{DiaconisFreedman1986, + author = {Diaconis, Persi and Freedman, David}, + title = {On the Consistency of {Bayes} Estimates}, + journal = {Annals of Statistics}, + year = {1986}, + volume = {14}, + number = {1}, + pages = {1--26}, + doi = {10.1214/aos/1176349830} +} + +@article{lucas1967adjustment, + title={Adjustment costs and the theory of supply}, + author={Lucas Jr, Robert E}, + journal={Journal of political economy}, + volume={75}, + number={4, Part 1}, + pages={321--334}, + year={1967}, + publisher={The University of Chicago Press} +} + +@article{Prescott_Visscher_1980, + author = {Prescott, Edward C. and Visscher, Michael}, + title = {Organization Capital}, + journal = {Journal of Political Economy}, + volume = {88}, + number = {3}, + pages = {446--461}, + year = {1980}, + publisher = {University of Chicago Press} +} + +@article{Coase_1937, + author = {Coase, Ronald H.}, + title = {The Nature of the Firm}, + journal = {Economica}, + volume = {4}, + number = {16}, + pages = {386--405}, + year = {1937} +} + +@book{Williamson_1975, + author = {Williamson, Oliver E.}, + title = {Markets and Hierarchies: Analysis and Antitrust Implications}, + publisher = {Free Press}, + address = {New York}, + year = {1975} +} + +@article{Lucas_Prescott_1971, + author = {Lucas, Robert E., Jr. and Prescott, Edward C.}, + title = {Investment under Uncertainty}, + journal = {Econometrica}, + volume = {39}, + number = {5}, + pages = {659--681}, + year = {1971} +} + +@article{Stigler_1958, + author = {Stigler, George J.}, + title = {The Economies of Scale}, + journal = {Journal of Law and Economics}, + volume = {1}, + pages = {54--71}, + year = {1958} +} + +@book{Becker_1975, + author = {Becker, Gary S.}, + title = {Human Capital: A Theoretical and Empirical Analysis, with Special Reference to Education}, + edition = {2nd}, + publisher = {National Bureau of Economic Research}, + address = {New York}, + year = {1975} +} + +@article{Mansfield_1962, + author = {Mansfield, Edwin}, + title = {Entry, {G}ibrat's Law, Innovation, and the Growth of Firms}, + journal = {American Economic Review}, + volume = {52}, + number = {5}, + pages = {1023--1051}, + year = {1962} +} + +@article{Hymer_Pashigian_1962, + author = {Hymer, Stephen and Pashigian, Peter}, + title = {Firm Size and Rate of Growth}, + journal = {Journal of Political Economy}, + volume = {70}, + number = {6}, + pages = {556--569}, + year = {1962} +} + + @article{blackwell1962, author = {Blackwell, David and Dubins, Lester E.}, diff --git a/lectures/_toc.yml b/lectures/_toc.yml index 10ecb41c8..b15de98f3 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -63,6 +63,7 @@ parts: - file: wealth_dynamics - file: kalman - file: kalman_2 + - file: organization_capital - file: measurement_models - caption: Search numbered: true diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md index 99e5ad5f7..10c5ef1f9 100644 --- a/lectures/merging_of_opinions.md +++ b/lectures/merging_of_opinions.md @@ -55,9 +55,11 @@ This result is connected to several other important ideas: operational criterion for when merging is guaranteed. We develop the theory in discrete time, where the argument is sharpest, and -sketch the continuous-time extension. Throughout we use the -**Beta–Bernoulli conjugate model** as the running numerical example: two -agents observe the same stream of coin flips but start with very different +sketch the continuous-time extension. + +Throughout we use the +**Beta–Bernoulli conjugate model** as a running numerical example: two +agents observe the same stream of coin flips but start with different priors over the coin's bias. Let us start with some imports. @@ -113,7 +115,9 @@ not conversely. ``` Mutual absolute continuity has a natural interpretation: $P \sim Q$ means the two -agents agree on which individual events are *possible*. They may disagree about +agents agree on which individual events are *possible*. + +They can disagree about how *likely* those events are, but neither agent considers an event impossible that the other considers possible. @@ -130,8 +134,9 @@ $$ = \frac{1}{2} \int_E \left|\frac{d\mu}{d\lambda} - \frac{d\nu}{d\lambda}\right| d\lambda, $$ -where $\lambda$ is any dominating measure. Equivalently, $\|\mu - \nu\|_{\mathrm{TV}} \in [0,1]$, -with 0 meaning $\mu = \nu$ and 1 meaning $\mu \perp \nu$ (mutual singularity). +where $\lambda$ is any dominating measure. + +Equivalently, $\|\mu - \nu\|_{\mathrm{TV}} \in [0,1]$, with 0 meaning $\mu = \nu$ and 1 meaning $\mu \perp \nu$ (mutual singularity). ``` When $\mu \ll \nu$ with $f = d\mu/d\nu$, @@ -148,7 +153,9 @@ event differ by at most $\|\mu - \nu\|_{\mathrm{TV}}$. ### The merging question The Blackwell–Dubins theorem is about the conditional distributions of the -**future** given the **past**. At time $n$, after observing $(x_1,\ldots,x_n)$, +**future** given the **past**. + +At time $n$, after observing $(x_1,\ldots,x_n)$, each agent forms a conditional distribution over all future events: $$ @@ -157,7 +164,9 @@ Q(\,\cdot\,|\,\mathscr{F}_n)(\omega). $$ These are probability measures on the whole future path, not just the next -observation. The merging question asks whether +observation. + +The merging question asks whether $$ d_n \;:=\; \bigl\|P(\,\cdot\,|\,\mathscr{F}_n) - Q(\,\cdot\,|\,\mathscr{F}_n)\bigr\|_{\mathrm{TV}} @@ -278,7 +287,9 @@ $$ $$ So $\{d_n, \mathscr{F}_n\}$ is a non-negative $Q$-supermartingale taking -values in $[0,1]$. By Doob's supermartingale convergence theorem, $d_n \to d_\infty$ +values in $[0,1]$. + +By Doob's supermartingale convergence theorem, $d_n \to d_\infty$ $Q$-almost surely for some $[0,1]$-valued random variable $d_\infty$. **Step 3 — The almost-sure limit is zero.** @@ -289,8 +300,11 @@ $$ $$ Hence $d_n \to 0$ in $L^1(Q)$, so $d_n \to 0$ in probability under $Q$. + But $d_n \to d_\infty$ $Q$-a.s. and $d_n \to 0$ in probability together -force $d_\infty = 0$ $Q$-a.s. Since $P \ll Q$, every $Q$-null set is +force $d_\infty = 0$ $Q$-a.s. + +Since $P \ll Q$, every $Q$-null set is $P$-null, so $d_n \to 0$ $P$-a.s. as well. $\square$ ```{admonition} Remark (One-sided vs. mutual absolute continuity) @@ -321,8 +335,9 @@ the simulations. ### Model Suppose the data stream $(x_1, x_2, \ldots)$ consists of IID Bernoulli -draws with unknown probability $p^* \in (0,1)$. Agent $i$ holds a -Beta prior: +draws with unknown probability $p^* \in (0,1)$. + +Agent $i$ has a Beta prior: $$ p \sim \mathrm{Beta}(\alpha_i, \beta_i), \qquad i = 1, 2. @@ -373,9 +388,11 @@ that $P_1 \sim P_2$ for any Beta priors with positive parameters. ### The exact Blackwell–Dubins distance For the Beta–Bernoulli model, there is a clean formula for $d_n$. + By de Finetti's theorem, each agent's conditional distribution of the *future infinite sequence* given the past is a mixture of IID Bernoulli$(p)$ processes, where $p$ is drawn from the posterior Beta distribution. + Since the Bernoulli$(p)^{\infty}$ measures for different $p$ are mutually singular (the empirical frequency identifies $p$ exactly), the TV distance between the two conditional distributions over the future equals the TV @@ -494,7 +511,8 @@ whose true probability of heads is $p^* = 0.65$. $\hat{p}_2^0 = 8/9 \approx 0.89$. Both priors are supported on all of $(0,1)$, so $P_1 \sim P_2$. -Blackwell–Dubins guarantees merging; the question is only how fast. + +Blackwell–Dubins guarantees merging; the question is how fast. ```{code-cell} ipython3 # ------------------------------------------------------------------------- @@ -657,6 +675,7 @@ expected values. ## The Supermartingale Property of $d_n$ The proof relies on $\{d_n\}$ being a non-negative supermartingale. + We can verify this numerically by checking that $d_n$ tends to decrease over time and that its conditional expectation does not increase. @@ -709,7 +728,9 @@ supermartingale property $\mathbb{E}_Q[d_{n+1}\,|\,\mathscr{F}_n] \leq d_n$. ## Failure of Merging: Mutual Singularity What happens when the hypothesis of the theorem fails — that is, when -$P \not\ll Q$? The answer is both instructive and stark. +$P \not\ll Q$? + +The answer is both instructive and stark. ### Point-mass priors @@ -796,7 +817,9 @@ plt.show() The contrast is vivid. With mutually absolutely continuous priors (blue), the total-variation distance decays to zero as Blackwell–Dubins guarantees. With mutually singular point-mass priors (red), the distance stays -permanently at $|p_P - p_Q| = 0.45$. More data never resolves the +permanently at $|p_P - p_Q| = 0.45$. + +More data never resolves the disagreement — the two agents are committed to models that are separated by events they each regard as having probability zero under the other's measure. @@ -1034,8 +1057,10 @@ $$ \int_0^\infty \theta_s^2\,ds < \infty \quad Q\text{-a.s.} $$ -Intuitively, this says the total amount of information that separates the -two measures over the infinite horizon is finite. When $\theta$ is a +Informally, this says the total amount of information that separates the +two measures over the infinite horizon is finite. + +When $\theta$ is a non-zero constant — meaning the two agents predict permanently different drifts — the energy condition fails, $P \perp Q$, and merging cannot occur. @@ -1051,11 +1076,14 @@ forces $d_\infty = 0$. ### Bayesian learning -The most direct application is to Bayesian inference. Suppose data +The most direct application is to Bayesian inference. + +Suppose data $(x_1, x_2, \ldots)$ are drawn from the true measure $Q^*$. An agent holds a prior $\pi$ over a family $\{Q_\theta : \theta \in \Theta\}$, -inducing a marginal $P = \int Q_\theta\,\pi(d\theta)$. If $P \ll Q^*$ -(equivalently: the prior assigns positive probability to every +inducing a marginal $P = \int Q_\theta\,\pi(d\theta)$. + +If $P \ll Q^*$ (equivalently: the prior assigns positive probability to every neighbourhood of the true model), then by Blackwell–Dubins, $$ @@ -1069,7 +1097,9 @@ as long as the prior is in the right absolute-continuity class. {cite:t}`diaconis1986` establish that absolute continuity of the prior with respect to the truth is not just sufficient but essentially *necessary* -for Doob consistency. When $P \perp Q^*$, there exist events of probability +for Doob consistency. + +When $P \perp Q^*$, there exist events of probability one under $Q^*$ that have probability zero under $P$, so the agent maintains fundamentally wrong beliefs forever. @@ -1077,7 +1107,9 @@ fundamentally wrong beliefs forever. In macroeconomics, the **common prior assumption** embedded in rational expectations models requires all agents to agree on the probability model -for the economy. Blackwell–Dubins provides a dynamic justification: if +for the economy. + +Blackwell–Dubins provides a dynamic justification: if two agents start with heterogeneous but mutually absolutely continuous priors and observe a common history, their conditional forecasts will eventually agree on every event, even if they never explicitly coordinate @@ -1085,6 +1117,8 @@ their beliefs. {cite:t}`aumann1976`'s **agreement theorem** strengthens this: agents with a common prior cannot "agree to disagree" on posterior probabilities. + + Blackwell–Dubins complements Aumann by showing that even without a common prior, merging occurs eventually if the initial priors are equivalent. @@ -1092,7 +1126,9 @@ prior, merging occurs eventually if the initial priors are equivalent. For a Markov chain with transition kernel $\Pi$ and two initial distributions $\mu$ and $\nu$, the $n$-step distributions are $\mu\Pi^n$ -and $\nu\Pi^n$. If $\Pi$ is ergodic with unique stationary distribution +and $\nu\Pi^n$. + +If $\Pi$ is ergodic with unique stationary distribution $\pi$, both converge to $\pi$, so $$ @@ -1103,7 +1139,9 @@ $$ This is a special form of merging that does *not* require absolute continuity, because ergodicity already forces both distributions to the same -limit. Blackwell–Dubins is the appropriate generalisation for +limit. + +Blackwell–Dubins is the appropriate generalisation for **non-ergodic** or **non-Markovian** processes, where no single invariant measure exists and the operative condition is absolute continuity of the initial priors. @@ -1112,7 +1150,9 @@ initial priors. ## The Rate of Merging Blackwell–Dubins gives only almost-sure convergence; it says nothing about -*how fast* $d_n \to 0$. The bound +*how fast* $d_n \to 0$. + +The bound $$ \mathbb{E}_Q[d_n] \leq \tfrac{1}{2}\,\mathbb{E}_Q[|Z_n - Z_\infty|] @@ -1123,8 +1163,9 @@ rate of the likelihood ratio martingale. For parametric Bayesian models, the posterior contracts at the $n^{-1/2}$ rate (Bernstein–von Mises theorem), which implies -$d_n = O(n^{-1/2})$ in expectation. The following figure illustrates -this for our Beta–Bernoulli model. +$d_n = O(n^{-1/2})$ in expectation. + +The following figure illustrates this for our Beta–Bernoulli model. ```{code-cell} ipython3 # ------------------------------------------------------------------------- @@ -1172,7 +1213,7 @@ for this parametric model on any single sample path. ## Summary -The main logical chain underlying the Blackwell–Dubins theorem is: +The logical flow underlying the Blackwell–Dubins theorem is: $$ P \ll Q @@ -1208,4 +1249,112 @@ Key takeaways: 5. **There is a sharp dichotomy**: either $P \sim Q$ (merging) or $P \perp Q$ (permanent disagreement). There is no middle ground. +## Applications in Economics + +{cite}`KalaiLehrer1993Nash` apply the Blackwell--Dubins theorem to show that +rational Bayesian learning in an infinitely repeated game leads play to +resemble a Nash equilibrium. + +If each player's prior is absolutely continuous +with respect to the true distribution of opponents' strategies, beliefs merge +with the truth and best responses converge to approximate Nash behavior. + +This paper is a primary conduit through which Blackwell--Dubins entered mainstream +economic theory. + +{cite}`KalaiLehrer1993Subjective` introduce the notion of a subjective +equilibrium in repeated games and use the merging result to establish that +subjective and objective equilibria coincide in the long run under the same +absolute continuity condition. + +{cite}`KalaiLehrer1994Merging` extend the Blackwell--Dubins framework by +introducing weaker notions of merging---weak and strong---suited to studying +convergence to equilibrium in infinite games and dynamic economies where +full total-variation merging is too demanding. + +{cite}`KalaiLehrerSmorodinsky1999` connect merging of opinions to the theory +of calibrated forecasting, showing that a forecaster whose priors satisfy an +absolute-continuity condition relative to the true process will be calibrated +in the long run. + + +{cite}`JacksonKalaiSmorodinsky1999` revisit de~Finetti's representation theorem +through the lens of Bayesian learning. + +They show that when agents learn about +a stochastic process by observing its realizations, the beliefs that emerge +have a de~Finetti-style exchangeability structure, with the Blackwell--Dubins +theorem playing a central role in establishing convergence of posterior +representations. + +{cite}`JacksonKalai1999` study a model of recurring games in which successive +cohorts of players observe the history of earlier play. Using the +rational-learning machinery of Kalai--Lehrer together with Blackwell--Dubins, +they show that reputational effects that can sustain non-Nash behavior in an +isolated group dissipate over time as social learning spreads through the +population. + + +{cite}`Sandroni1998Nash` shows that the absolute continuity condition required +by Blackwell--Dubins and by Kalai--Lehrer can be weakened to *almost* absolute +continuity---a condition under which Nash convergence of learning still holds, +broadening the scope of the rational-learning program. + +{cite}`Sandroni2000` provides an alternative proof of the Blackwell--Dubins +theorem that makes the role of absolute continuity transparent. + +The paper +argues that *persistent disagreement*---the negation of merging---implies the +existence of mutually favorable bets on which each agent is certain to profit +on average, a violation of absolute continuity. + +The analysis raises questions +about the economic relevance of the merging result by clarifying just how +strong the absolute continuity hypothesis is. + + +{cite}`LehrerSmorodinsky1996Compatible` characterize the class of +*compatible* pairs of measures---those for which some form of merging +obtains---going beyond the sufficient condition of absolute continuity +used by Blackwell and Dubins. + +{cite}`LehrerSmorodinsky1996Learning` survey the relationship between merging +of opinions and learning in repeated strategic environments, collected in a +volume honoring David Blackwell. + + +{cite}`Nyarko1994` uses Blackwell--Dubins to prove that Bayesian learning in +an infinitely repeated normal-form game leads beliefs and empirical +distributions to converge to a correlated equilibrium of the true game, +under the absolute continuity of priors. + +{cite}`PomattoAlNajjarSandroni2014` extend the Blackwell--Dubins theorem to +the class of finitely additive (Savagean) probability measures. + +They show +that the theorem holds for extreme points of the set of measures compatible +with a given prior, and they exploit this characterization to study when +statistical tests of forecasting ability can be manipulated---connecting +the merging and testing literatures. + +{cite}`AcemogluChernozhukovYildiz2016` identify a fragility in the +Blackwell--Dubins consensus result. + +When agents are uncertain not just about +an underlying parameter but also about the mapping from the parameter to signal +distributions, absolute continuity of priors is no longer sufficient for +asymptotic agreement. + +Even with identical support, agents may disagree +forever, providing a Bayesian foundation for persistent heterogeneous beliefs. + +## A Key Companion Paper from Probability + +{cite}`DiaconisFreedman1986` establish consistency of Bayes estimates under +misspecification, a result in the same intellectual tradition as +Blackwell--Dubins. + +It is routinely co-cited with the merging theorem in the +economics learning literature as providing the probabilistic underpinning for +Bayesian consistency. diff --git a/lectures/organization_capital.md b/lectures/organization_capital.md new file mode 100644 index 000000000..efda3e91e --- /dev/null +++ b/lectures/organization_capital.md @@ -0,0 +1,1007 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.11.1 +kernelspec: + display_name: Python 3 + language: python + name: python3 +--- + +# Organization Capital + +```{index} single: Organization Capital +``` + +## Overview + +This lecture describes a theory of **organization capital** proposed by +{cite}`Prescott_Visscher_1980`. + +Prescott and Visscher define organization capital as information that a firm accumulates +about its employees, teams, and production processes. + +This information is an **asset** to the firm because it affects the production possibility set +and is produced jointly with output. + +Costs of adjusting the stock of organization capital constrain the firm's growth rate, +providing an explanation for + +1. why firm growth rates are independent of firm size (Gibrat's Law) +1. why adjustment costs for rapid growth arise endogenously rather than being assumed + +The paper offers three examples of organization capital: + +* **Personnel information**: knowledge about the match between workers and tasks +* **Team information**: knowledge about how well groups of workers mesh +* **Firm-specific human capital**: skills of employees enhanced by on-the-job training + +In each case, the investment possibilities lead firms to grow at a common rate, +yielding constant returns to scale together with increasing costs of rapid size adjustment. + +```{note} +The theory is related to ideas of {cite}`Coase_1937` and {cite}`Williamson_1975` about the nature of the firm. +Prescott and Visscher stress the firm's role as a storehouse of information and argue that +incentives within the firm are created for efficient accumulation and use of that information. +``` + +Let's start with some imports: + +```{code-cell} ipython3 +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm +from scipy.optimize import brentq +``` + +## The Basic Idea + +The firm is a storehouse of information. + +Within the firm, incentives are created for the efficient accumulation and use of that information. + +Prescott and Visscher exploit this concept to explain certain facts about firm growth and +size distribution. + +The key insight: the process by which information is accumulated naturally leads to + +1. **constant returns to scale**, and +2. **increasing costs to rapid firm size adjustment** + +Constant returns to scale explain the absence of an observed unique optimum firm size +(see {cite}`Stigler_1958`). + +Without costs of adjustment, the pattern of investment +by firms in the face of a change in market demand would exhibit +discontinuities we do not observe. + +Further, without a cost penalty to rapid growth, the first firm to +discover a previously untapped market would preempt competition by +usurping all profitable investments as they appear, thus implying +monopoly more prevalent than it is. + + +## Personnel Information as Organization Capital + +```{index} single: Organization Capital; Personnel Information +``` + +The first example of organization capital is information about the +match between workers and tasks. + +### Setup + +Workers have different sets of skills and talents. + +A variable $\theta$ measures the aptitude of a worker for a particular kind of work. + +* Workers with high $\theta$ have comparative advantage in tasks requiring repeated attention to detail +* Workers with low $\theta$ have comparative advantage in work requiring broadly defined duties + +The population distribution of $\theta$ is normal with mean zero and precision (inverse of variance) $\pi$: + +$$ +\theta \sim N(0, 1/\pi) +$$ + +When a worker is hired from the labor pool, neither the worker nor the employer knows $\theta$. +Both know only the population distribution. + +### Three Tasks + +If $q$ units of output are produced, assume: + +* $\varphi_1 q$ workers are assigned to **task 1** (screening) +* $\varphi q$ workers are assigned to **task 2** +* the remaining workers are assigned to **task 3** + +where $\varphi_1 + 2\varphi = 1$. + +```{note} +The fixed coefficients technology requires a constant ratio between the number of +personnel in jobs 2 and 3 and the number assigned to job 1. +``` + +For task 1, the screening task, per unit cost of production is **invariant** to the $\theta$-values of the individuals assigned. + +However, the larger a worker's $\theta$, the larger is his product in task 2 relative to +his product in task 3. + +Consequently: + +* a worker with a highly positive $\theta$ is much better suited for task 2 +* a worker with a highly negative $\theta$ is much better suited for task 3 + +### Bayesian Learning + +Performance in tasks 2 or 3 cannot be observed at the individual level. + +But information about a worker's $\theta$-value can be obtained from observing +performance in task 1, the screening task. + +The expert supervising the apprentice determines a value of $z$ each period: + +$$ +z_{it} = \theta_i + \epsilon_{it} +$$ (eq:signal) + +where $\epsilon_{it} \sim N(0, 1)$ are independently distributed over both workers $i$ and periods $t$. + +After $n$ observations on a worker in the screening job, the **posterior distribution** of $\theta$ is normal with + +**posterior mean:** + +$$ +m = \frac{1}{\pi + n} \sum_{k=1}^{n} z_k +$$ (eq:post_mean) + +**posterior precision:** + +$$ +h = \pi + n +$$ (eq:post_prec) + +Knowledge of an individual is thus completely characterized by the pair $(m, h)$. + +```{code-cell} ipython3 +def bayesian_update(z_observations, prior_precision): + """ + Compute posterior mean and precision after observing signals. + + Parameters + ---------- + z_observations : array_like + Observed signals z_1, ..., z_n + prior_precision : float + Precision π of the prior distribution + + Returns + ------- + m : float + Posterior mean + h : float + Posterior precision + """ + n = len(z_observations) + h = prior_precision + n + m = np.sum(z_observations) / h + return m, h +``` + +Let's visualize how the posterior evolves as we observe a worker whose true $\theta = 0.8$: + +```{code-cell} ipython3 +np.random.seed(42) + +# True worker type +theta_true = 0.8 + +# Prior precision +pi = 1.0 + +# Generate signals +T = 20 +epsilons = np.random.randn(T) +z_signals = theta_true + epsilons + +# Track posterior evolution +posterior_means = [] +posterior_stds = [] + +for n in range(1, T + 1): + m, h = bayesian_update(z_signals[:n], pi) + posterior_means.append(m) + posterior_stds.append(1 / np.sqrt(h)) + +fig, axes = plt.subplots(1, 2, figsize=(12, 5)) + +# Plot posterior mean convergence +ax = axes[0] +ax.plot(range(1, T + 1), posterior_means, 'b-o', markersize=4, + label='Posterior mean $m$') +ax.axhline(theta_true, color='r', linestyle='--', + label=fr'True $\theta = {theta_true}$') +ax.set_xlabel('Number of observations $n$') +ax.set_ylabel('Posterior mean $m$') +ax.set_title('Convergence of Posterior Mean') +ax.legend() + +# Plot posterior standard deviation +ax = axes[1] +ax.plot(range(1, T + 1), posterior_stds, 'g-o', markersize=4, + label='Posterior std $1/\sqrt{h}$') +ax.set_xlabel('Number of observations $n$') +ax.set_ylabel('Posterior standard deviation') +ax.set_title('Shrinking Posterior Uncertainty') +ax.legend() + +plt.tight_layout() +plt.show() +``` + +As the number of screening observations $n$ increases, the posterior mean converges +to the true $\theta$, and the posterior uncertainty shrinks at rate $1/\sqrt{n}$. + +### Per Unit Costs of Production + +Under the nonsequential assignment rule, employees with the greatest seniority +are assigned to jobs 2 and 3, while newer employees remain in the screening task. + +Workers with $m > 0$ are assigned to task 2, and those with $m \leq 0$ to task 3. + +Per unit costs of production, assuming this assignment after $n$ screening periods, are: + +$$ +c(n) = c_1 + c_2 + c_3 - E\{\theta \mid m > 0\} + E\{\theta \mid m \leq 0\} +$$ (eq:unit_cost) + +Because $m$ is normally distributed, evaluation of the conditional expectation in +{eq}`eq:unit_cost` yields per unit costs as a function of $n$: + +$$ +c(n) = c - 0.7978 \frac{n}{\pi(\pi + n)} +$$ (eq:cost_n) + +where $c = c_1 + c_2 + c_3$ and $0.7978 = 2 \int_0^{\infty} \frac{t}{\sqrt{2\pi}} e^{-t^2/2} dt$. + +```{note} +The constant $0.7978 \approx \sqrt{2/\pi}$ is twice the mean of the half-normal distribution. +It arises from computing $E[\theta \mid m > 0] - E[\theta \mid m \leq 0]$ for a normal distribution. +``` + +The function $c(n)$ decreases at a **decreasing rate** in $n$: more screening observations +reduce costs but with diminishing returns. + +```{code-cell} ipython3 +def cost_per_unit(n_vals, pi, c_bar=1.0): + """ + Per unit cost of production as a function of screening periods n. + + Parameters + ---------- + n_vals : array_like + Number of screening periods + pi : float + Prior precision + c_bar : float + Base cost c = c1 + c2 + c3 + + Returns + ------- + costs : array + Per unit costs c(n) + """ + n_vals = np.asarray(n_vals, dtype=float) + return c_bar - 0.7978 * n_vals / (pi * (pi + n_vals)) + + +fig, ax = plt.subplots(figsize=(10, 6)) + +n_vals = np.linspace(0.1, 50, 200) + +for pi in [0.5, 1.0, 2.0, 5.0]: + costs = cost_per_unit(n_vals, pi) + ax.plot(n_vals, costs, label=fr'$\pi = {pi}$') + +ax.set_xlabel('Screening periods $n$') +ax.set_ylabel('Per unit cost $c(n)$') +ax.set_title('Per Unit Costs Decrease with Screening Time') +ax.legend() +ax.set_xlim(0, 50) +plt.tight_layout() +plt.show() +``` + +The figure shows that: + +* costs decrease with more screening time $n$ +* the decrease is at a declining rate (diminishing returns to screening) +* for smaller prior precision $\pi$ (more initial uncertainty about worker types), the gains from screening are larger + +This diminishing-returns structure is the source of the **increasing costs of rapid adjustment**. + + +### Growth Rate and Screening Time + +The greater the growth rate, the smaller must be $n$ --- the time spent in the screening +task before assignment to job 2 or 3. + +If $\gamma$ is the growth rate of output and $\rho$ is the quit rate, and $y_i$ is the current number +of vintage $i$ employees, then + +$$ +(1 + \gamma) y_{i+1} = (1 - \rho) y_i +$$ + +Letting $\xi = (1 - \rho)/(1 + \gamma)$, from the above $y_i = \xi^i y_0$. + +For the fixed coefficients technology, the fraction of present personnel with vintage +greater than $n$ must equal $2\varphi / (\varphi_1 + 2\varphi)$, which gives: + +$$ +\xi^{n+1} = \frac{2\varphi}{\varphi_1 + 2\varphi} +$$ (eq:cutoff) + +Solving for $n$ as a function of $\gamma$: + +$$ +n(\gamma) = \frac{\log(2\varphi) - \log(\varphi_1 + 2\varphi)}{\log(1 - \rho) - \log(1 + \gamma)} - 1 \quad \text{for } \gamma > -\rho +$$ (eq:n_gamma) + +```{code-cell} ipython3 +def screening_time(gamma, rho, phi1, phi): + """ + Compute the screening time n as a function of growth rate γ. + + Parameters + ---------- + gamma : array_like + Growth rate of output + rho : float + Quit rate + phi1 : float + Fraction of workers in task 1 per unit output + phi : float + Fraction of workers in each of tasks 2, 3 per unit output + + Returns + ------- + n : array + Screening periods before assignment + """ + gamma = np.asarray(gamma, dtype=float) + numerator = np.log(2 * phi) - np.log(phi1 + 2 * phi) + denominator = np.log(1 - rho) - np.log(1 + gamma) + return numerator / denominator - 1 + + +# Parameters +rho = 0.1 # quit rate +phi1 = 0.5 # fraction in screening +phi = 0.25 # fraction in each of tasks 2, 3 + +gamma_vals = np.linspace(-0.05, 0.30, 200) + +# Filter valid range: γ > -ρ and ensure n > 0 +valid = gamma_vals > -rho +gamma_valid = gamma_vals[valid] +n_vals = screening_time(gamma_valid, rho, phi1, phi) +# Only keep non-negative n +mask = n_vals > 0 +gamma_plot = gamma_valid[mask] +n_plot = n_vals[mask] + +fig, ax = plt.subplots(figsize=(10, 6)) +ax.plot(gamma_plot, n_plot, 'b-', linewidth=2) +ax.set_xlabel(r'Growth rate $\gamma$') +ax.set_ylabel(r'Screening periods $n(\gamma)$') +ax.set_title('Faster Growth Means Less Screening Time') +ax.set_xlim(gamma_plot[0], gamma_plot[-1]) +plt.tight_layout() +plt.show() +``` + +The figure shows the key trade-off: **faster growth forces shorter screening periods**. + +When growth is rapid, new workers must be promoted from the screening task to +productive tasks more quickly, so less information is gathered about each worker +before assignment. + + +### Combined Effect: Growth Rate and Per Unit Costs + +Composing the functions $c(n)$ and $n(\gamma)$ reveals how per unit costs depend on the +growth rate: + +```{code-cell} ipython3 +fig, ax = plt.subplots(figsize=(10, 6)) + +pi = 1.0 +c_bar = 1.0 + +# Compute per unit costs as function of growth rate +n_of_gamma = screening_time(gamma_plot, rho, phi1, phi) +costs_of_gamma = cost_per_unit(n_of_gamma, pi, c_bar) + +ax.plot(gamma_plot, costs_of_gamma, 'r-', linewidth=2) +ax.set_xlabel(r'Growth rate $\gamma$') +ax.set_ylabel(r'Per unit cost $c(n(\gamma))$') +ax.set_title('Per Unit Costs Increase with Growth Rate') +ax.set_xlim(gamma_plot[0], gamma_plot[-1]) +plt.tight_layout() +plt.show() +``` + +This establishes the key result: **increasing costs of rapid adjustment arise endogenously** +from the trade-off between screening and growth. + +The faster the firm grows, the less time it has to screen workers, the poorer the +match between workers and tasks, and the higher the per unit production costs. + + +## Industry Equilibrium + +```{index} single: Organization Capital; Industry Equilibrium +``` + +Firm growth rates are independent of firm size in this model because the +mathematical structure of the technology constraint is the same as that +considered in {cite}`lucas1967adjustment`, except that the stock of organization capital +is a vector rather than a scalar. + +The technology set facing price-taking firms is a **convex cone**: there are +constant returns to scale. + +Constant returns and internal adjustment costs, along with some costs of +transferring capital between firms, yield an optimum rate of firm growth +**independent of the firm's size** --- this is Gibrat's Law. + +The bounded, downward-sloping, inverse industry demand function is + +$$ +P_t = p(Q_t, u_t) +$$ + +where $Q_t$ is the sum of output over all firms and $u_t$ is a demand shock +subject to a stationary Markov process. + +Prescott and Visscher show that a competitive equilibrium exists using the +framework of {cite}`Lucas_Prescott_1971`. + +The discounted consumer surplus to be maximized is + +$$ +\sum_{t=0}^{\infty} \beta^t \left\{ \int_0^{Q_t} p(y, u_t) dy - Bw - Q_t \sum_i (A_{i2t} + A_{i3t}) c(i) / \sum_i (A_{i2t} + A_{i3t}) \right\} +$$ (eq:surplus) + +where $A_{i2t}, A_{i3t}$, and $B$ are obtained by summing $a_{i2t}$, $a_{i3t}$, and $b$, +respectively, over all firms in the industry. + + +### Key Property: Growth Rates Independent of Size + +If two firms have organization capital vectors $\underline{k}$ that are proportional at a point in time, +they will be proportional in all future periods. + +That is, **growth rates are independent of firm size**. + +```{code-cell} ipython3 +def simulate_firm_growth(T, gamma, rho, q0, seed=42): + """ + Simulate firm output growth with constant growth rate + and stochastic quit turnover. + + Parameters + ---------- + T : int + Number of periods + gamma : float + Equilibrium growth rate + rho : float + Quit rate + q0 : float + Initial output + seed : int + Random seed + + Returns + ------- + output : array + Firm output path + """ + rng = np.random.default_rng(seed) + output = np.zeros(T) + output[0] = q0 + for t in range(1, T): + # Stochastic growth around equilibrium rate + shock = rng.normal(0, 0.02) + output[t] = output[t-1] * (1 + gamma + shock) + return output + + +T = 50 +gamma_eq = 0.05 # equilibrium growth rate +rho = 0.1 + +# Simulate firms of different initial sizes +fig, axes = plt.subplots(1, 2, figsize=(14, 5)) + +# Level plots +ax = axes[0] +for q0, label in [(10, 'Small firm'), (50, 'Medium firm'), + (200, 'Large firm')]: + output = simulate_firm_growth(T, gamma_eq, rho, q0, + seed=int(q0)) + ax.plot(range(T), output, label=f'{label} ($q_0={q0}$)') +ax.set_xlabel('Period') +ax.set_ylabel('Output $q_t$') +ax.set_title('Firm Output Levels') +ax.legend() + +# Log plots (growth rates) +ax = axes[1] +for q0, label in [(10, 'Small firm'), (50, 'Medium firm'), + (200, 'Large firm')]: + output = simulate_firm_growth(T, gamma_eq, rho, q0, + seed=int(q0)) + ax.plot(range(T), np.log(output), label=f'{label} ($q_0={q0}$)') +ax.set_xlabel('Period') +ax.set_ylabel(r'$\log(q_t)$') +ax.set_title('Log Output (Parallel = Equal Growth Rates)') +ax.legend() + +plt.tight_layout() +plt.show() +``` + +The right panel shows that all firms grow at the same rate regardless of initial size --- +the log output paths are parallel. + +This is **Gibrat's Law**: growth rates are independent of firm size. + +## Bayesian Screening Simulation + +```{index} single: Organization Capital; Bayesian Screening +``` + +Let's simulate the full screening and assignment process for a single firm. + +We draw workers from the population, observe their signals in the screening task, +and then assign them to the appropriate productive task based on the posterior mean. + +```{code-cell} ipython3 +def simulate_screening(n_workers, n_screen, pi, seed=123): + """ + Simulate the screening and assignment of workers. + + Parameters + ---------- + n_workers : int + Number of workers to screen + n_screen : int + Number of screening periods per worker + pi : float + Prior precision of θ distribution + seed : int + Random seed + + Returns + ------- + results : dict + Dictionary with θ values, posterior means, + assignments, and misassignment rate + """ + rng = np.random.default_rng(seed) + + # Draw true worker types + theta = rng.normal(0, 1/np.sqrt(pi), n_workers) + + # Generate screening signals + signals = (theta[:, None] + + rng.normal(0, 1, (n_workers, n_screen))) + + # Compute posterior means after screening + posterior_means = signals.sum(axis=1) / (pi + n_screen) + + # Assign workers: m > 0 → task 2, m ≤ 0 → task 3 + assignment = np.where(posterior_means > 0, 2, 3) + + # Correct assignment based on true θ + correct_assignment = np.where(theta > 0, 2, 3) + + # Misassignment rate + misassignment_rate = np.mean(assignment != correct_assignment) + + return { + 'theta': theta, + 'posterior_means': posterior_means, + 'assignment': assignment, + 'correct_assignment': correct_assignment, + 'misassignment_rate': misassignment_rate + } + + +pi = 1.0 +n_workers = 5000 +screening_periods = [1, 3, 5, 10, 20, 50] + +fig, axes = plt.subplots(2, 3, figsize=(15, 10)) +axes = axes.flatten() + +misassignment_rates = [] + +for idx, n_screen in enumerate(screening_periods): + results = simulate_screening(n_workers, n_screen, pi) + misassignment_rates.append(results['misassignment_rate']) + + ax = axes[idx] + theta = results['theta'] + m = results['posterior_means'] + + # Color by whether assignment matches true type + correct = results['assignment'] == results['correct_assignment'] + ax.scatter(theta[correct], m[correct], alpha=0.1, s=5, + color='blue', label='Correct') + ax.scatter(theta[~correct], m[~correct], alpha=0.3, s=5, + color='red', label='Misassigned') + ax.axhline(0, color='k', linewidth=0.5) + ax.axvline(0, color='k', linewidth=0.5) + mis = results['misassignment_rate'] + ax.set_title(f'$n = {n_screen}$, misassign = {mis:.1%}') + ax.set_xlabel(r'True $\theta$') + ax.set_ylabel('Posterior mean $m$') + if idx == 0: + ax.legend(markerscale=5, loc='upper left') + +plt.tight_layout() +plt.show() +``` + +Red dots are workers who are **misassigned** --- placed in the wrong productive task +because the posterior mean had the wrong sign relative to their true $\theta$. + +As $n$ increases: +* The posterior mean $m$ becomes more strongly correlated with $\theta$ +* Misassignment rates fall + +```{code-cell} ipython3 +fig, ax = plt.subplots(figsize=(10, 6)) + +n_range = np.arange(1, 51) +mis_rates = [] +for n_screen in n_range: + results = simulate_screening(n_workers, n_screen, pi) + mis_rates.append(results['misassignment_rate']) + +ax.plot(n_range, mis_rates, 'b-o', markersize=3) +ax.set_xlabel('Screening periods $n$') +ax.set_ylabel('Misassignment rate') +ax.set_title('Misassignment Rate Decreases with Screening Time') +plt.tight_layout() +plt.show() +``` + +This confirms the theoretical prediction: the cost savings from better assignment +exhibit **diminishing returns** in the screening time $n$. + +## Team Information + +```{index} single: Organization Capital; Team Information +``` + +Personnel information need not be valuable only because it facilitates the matching of +workers to tasks. + +Another equally valuable use of personnel information is in the **matching of workers to workers**. + +What is important to performance in many activities within the firm is not just +the aptitude of an individual assigned to a task, but also how well the +characteristics of the individual mesh with those of others performing related duties. + +### Structure + +Suppose workers are grouped into teams, and team $i$ assigned to a screening task +has an observed productivity indicator + +$$ +z_{it} = \theta_i + \epsilon_{it} +$$ + +where: +* $\theta_i$ is a deterministic component directly related to how well team workers are paired +* $\epsilon_{it} \sim N(0, 1)$ are i.i.d. stochastic components + +The $\theta$ from all possible teams are approximately independently and normally distributed +$N(\mu, 1/\pi)$. + +After $n$ observations on team $i$, the posterior distribution on $\theta_i$ is normal with + +$$ +m = \mu + \frac{1}{\pi + n} \sum_{k=1}^{n} (z_k - \mu) +$$ + +and precision $h = \pi + n$. + +If dissolution of a team also dissolves the accrued information, the team information +model has the **same mathematical structure** as the personnel information model. + +```{code-cell} ipython3 +def simulate_team_screening(n_teams, n_screen, pi, mu=0.5, + seed=456): + """ + Simulate team screening with Bayesian updating. + + Parameters + ---------- + n_teams : int + Number of teams to screen + n_screen : int + Number of screening periods + pi : float + Prior precision + mu : float + Prior mean of team quality + seed : int + Random seed + + Returns + ------- + results : dict + """ + rng = np.random.default_rng(seed) + + # True team qualities + theta = rng.normal(mu, 1/np.sqrt(pi), n_teams) + + # Generate signals + signals = (theta[:, None] + + rng.normal(0, 1, (n_teams, n_screen))) + + # Posterior means + z_bar = signals.mean(axis=1) + post_means = mu + n_screen * (z_bar - mu) / (pi + n_screen) + post_prec = pi + n_screen + + return { + 'theta': theta, + 'posterior_means': post_means, + 'posterior_precision': post_prec + } + + +fig, axes = plt.subplots(1, 3, figsize=(15, 5)) + +for idx, n_screen in enumerate([1, 5, 20]): + results = simulate_team_screening(500, n_screen, pi=1.0, mu=0.5) + + ax = axes[idx] + ax.scatter(results['theta'], results['posterior_means'], + alpha=0.4, s=10) + lims = [-1.5, 2.5] + ax.plot(lims, lims, 'r--', alpha=0.5, label='45° line') + ax.set_xlabel(r'True team quality $\theta$') + ax.set_ylabel('Posterior mean $m$') + ax.set_title(f'$n = {n_screen}$ screening periods') + ax.set_xlim(lims) + ax.set_ylim(lims) + ax.legend() + ax.set_aspect('equal') + +plt.tight_layout() +plt.show() +``` + +As with individual screening, more observations improve the precision of team quality +estimates. +Rapid growth forces fewer observations before team assignments must be finalized, leading +to higher costs. + + +## Firm-Specific Human Capital + +```{index} single: Organization Capital; Human Capital +``` + +The third example: organization capital consists of the **human capital** of the firm's employees. + +The capacity of the organization to function effectively as a production unit is +determined largely by the level and meshing of the skills of the employees. + +```{note} +The case for the human capital of employees being part of the capital stock of the firm +is well established (see {cite}`Becker_1975`). Productivity in the future depends on levels +of human capital in the future, but to acquire human capital for the future, a sacrifice +in real resources is required in the present. +``` + +The key features are: + +* Output and skill enhancement are **joint products** resulting from the combination of + labor inputs possessing different skill levels + +* Experienced and inexperienced workers are combined in one of several available technical + processes to generate the firm's product, and in the process, the overall competence + of the work force is improved + +* The transformation frontier between current output and future human capital is + **concave** and linearly homogeneous + +This gives the technology set the structure of a closed convex cone with a vertex at the +origin --- sufficient for optimal proportional growth by firms. + +### Concave Transformation Frontier + +```{code-cell} ipython3 +def transformation_frontier(q, alpha=0.7): + """ + Concave transformation frontier between current output + and future human capital increment. + + Parameters + ---------- + q : array_like + Current output (fraction of capacity) + alpha : float + Concavity parameter + + Returns + ------- + hk : array + Future human capital increment + """ + q = np.asarray(q, dtype=float) + return (1 - q**alpha)**(1/alpha) + + +fig, ax = plt.subplots(figsize=(8, 8)) + +q_vals = np.linspace(0, 1, 200) + +for alpha in [0.5, 0.7, 1.0, 1.5]: + hk = transformation_frontier(q_vals, alpha) + ax.plot(q_vals, hk, + label=fr'$\alpha = {alpha}$', linewidth=2) + +ax.set_xlabel('Current output $q$ (fraction of capacity)') +ax.set_ylabel('Future human capital increment $\\Delta h$') +ax.set_title('Concave Transformation Frontier') +ax.legend() +ax.set_xlim(0, 1.05) +ax.set_ylim(0, 1.05) +ax.set_aspect('equal') +plt.tight_layout() +plt.show() +``` + +The concavity of the transformation frontier means that moving from an extremely +unbalanced bundle of production and learning activity to a more balanced bundle +entails little sacrifice. + +But a workday consisting primarily of learning also has diminishing returns, +creating the cost of rapid adjustment. + + +## Costs of Transferring Organization Capital + +```{index} single: Organization Capital; Transfer Costs +``` + +If there were no cost to transferring organization capital from one firm to another, +the model would not place constraints on the firm's growth rate. + +Firms could then merge, divest, or pirate each other's personnel without a cost penalty +and thus produce a pattern of growth not restricted by the model. + +Organization capital is **not** costlessly moved, however: + +1. **Moving is disruptive**: relocating from one locale to another is disruptive to both + employee and family + +2. **Information is firm-specific**: the information set that makes a person productive + in one organization may not make that person as productive in another, even if both + firms produce identical output + + * Facility with a computer system at one firm + * Knowing whom to ask when problems arise + * Rapport with buyers or sellers + +These are types of organization capital in one firm that **cannot be transferred costlessly** +to another. + + +## Summary and Implications + +The Prescott-Visscher model provides a unified framework in which: + +* The firm exists as an entity because it is an efficient structure for accumulating, + storing, and using information + +* **Constant returns to scale** arise because once the best combinations of worker types + are discovered, nothing prevents the firm from replicating those combinations with + proportional gains in product + +* **Increasing adjustment costs** arise endogenously from the trade-off between + current production and investment in organization capital + +* **Gibrat's Law** --- growth rates independent of firm size --- is a natural implication + +* Large firms should have growth rates that display **less variance** than small firms + because large firms are essentially portfolios of smaller production units + +```{code-cell} ipython3 +# Illustrate the variance reduction in growth rates for large vs small firms +def simulate_growth_rate_distribution(n_firms, n_subunits, gamma, + sigma, T=100, seed=789): + """ + Simulate growth rate distributions for firms of different sizes. + + Parameters + ---------- + n_firms : int + Number of firms to simulate + n_subunits : int + Number of independent subunits per firm + gamma : float + Mean growth rate + sigma : float + Std dev of growth rate per subunit + T : int + Number of periods + seed : int + Random seed + + Returns + ------- + growth_rates : array + Realized growth rates for each firm + """ + rng = np.random.default_rng(seed) + # Each firm's growth is average of n_subunit growth rates + subunit_growth = rng.normal(gamma, sigma, + (n_firms, n_subunits, T)) + firm_growth = subunit_growth.mean(axis=1) # average across subunits + # Return time-averaged growth rate for each firm + return firm_growth.mean(axis=1) + + +fig, ax = plt.subplots(figsize=(10, 6)) + +sizes = {'Small (1 unit)': 1, + 'Medium (5 units)': 5, + 'Large (20 units)': 20} + +gamma = 0.05 +sigma = 0.10 + +for label, n_sub in sizes.items(): + rates = simulate_growth_rate_distribution( + 2000, n_sub, gamma, sigma) + ax.hist(rates, bins=50, alpha=0.5, density=True, + label=f'{label}: std={rates.std():.4f}') + +ax.set_xlabel('Average growth rate') +ax.set_ylabel('Density') +ax.set_title('Growth Rate Distributions by Firm Size') +ax.legend() +ax.axvline(gamma, color='k', linestyle='--', + label=r'$\gamma$', alpha=0.5) +plt.tight_layout() +plt.show() +``` + +The figure shows that although all firms have the **same mean growth rate** (Gibrat's Law), +large firms display **less variance** in realized growth rates because they are effectively +portfolios of independent subunits. + +This is consistent with the empirical findings of {cite}`Mansfield_1962` and {cite}`Hymer_Pashigian_1962`. + +The essence of the Prescott-Visscher theory is that the nature of the firm is tied to +**organization capital**. + +What distinguishes the firm from other relationships is that it is a structure within which +agents have the incentive to acquire and reveal information in a manner that is less +costly than in possible alternative institutions. + From 85df95e2f7f459a406dd9426bbed3546d4c37b9e Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 2 Apr 2026 10:07:20 +1100 Subject: [PATCH 06/20] updates --- lectures/merging_of_opinions.md | 803 ++++++++++++++------------------ 1 file changed, 359 insertions(+), 444 deletions(-) diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md index 10c5ef1f9..ece4a567b 100644 --- a/lectures/merging_of_opinions.md +++ b/lectures/merging_of_opinions.md @@ -28,39 +28,29 @@ kernelspec: ## Overview -This lecture studies the **merging-of-opinions theorem** of {cite:t}`blackwell1962`. +This lecture studies the merging-of-opinions theorem of {cite:t}`blackwell1962`. -The theorem answers a central question in Bayesian epistemology and statistical decision theory: +The theorem asks a simple question: > If two agents hold different prior beliefs about a stochastic process but observe the same stream of data indefinitely, will their probability assessments eventually converge? -The answer is a striking affirmative. Provided the two probability measures are -**mutually absolutely continuous** — each assigns positive probability to every -event the other considers possible — their conditional forecasts about all future -events merge to zero total-variation distance, *almost surely*. - -This result is connected to several other important ideas: - -- **Bayesian consistency**: a Bayesian agent's posterior predictions converge to - the truth whenever the prior assigns positive probability to the true model - ({doc}`likelihood_bayes`). -- **The rational-expectations hypothesis**: agents who disagree about the initial - model but share a common history will eventually agree on all conditional - forecasts ({cite:t}`aumann1976`). -- **Ergodic theory**: merging plays the role of ergodicity for non-Markovian - processes, forcing long-run agreement without requiring a common stationary - distribution. -- **Kakutani's dichotomy**: for product measures, mutual absolute continuity is - equivalent to a simple condition on Hellinger affinities, giving a clean - operational criterion for when merging is guaranteed. - -We develop the theory in discrete time, where the argument is sharpest, and -sketch the continuous-time extension. - -Throughout we use the -**Beta–Bernoulli conjugate model** as a running numerical example: two -agents observe the same stream of coin flips but start with different -priors over the coin's bias. +The answer is yes under an absolute-continuity condition. + +If $P \ll Q$, then the conditional distributions under $P$ and $Q$ over the entire future path merge in total variation, $Q$-almost surely. + +If in addition $Q \ll P$, then the same conclusion holds under both agents' probabilities. + +This result connects to several other ideas: + +- Bayesian consistency: posterior predictions approach the truth when the prior lies in the right absolute-continuity class ({doc}`likelihood_bayes`). +- Agreement results: common data can eliminate disagreement even when agents start from different priors ({cite:t}`aumann1976`). +- Kakutani's dichotomy: for product measures, equivalence versus singularity can be read from a Hellinger criterion. + +We develop the theory in discrete time and then sketch the continuous-time analogue. + +Throughout, we use the Beta–Bernoulli model as a running example. + +Two agents observe the same stream of coin flips but start from different priors over the coin's bias. Let us start with some imports. @@ -69,16 +59,14 @@ import numpy as np import matplotlib.pyplot as plt from scipy.stats import beta as beta_dist from scipy.special import betaln - -np.random.seed(42) ``` -## Probability Measures on Sequence Spaces +## Probability measures on sequence spaces ### The sequence space and its filtration -Let $(S, \mathscr{S})$ be a measurable space (the *signal space*). +Let $(S, \mathscr{S})$ be a measurable space, called the signal space. Set $\Omega = S^{\mathbb{N}}$, the set of all infinite sequences $\omega = (x_1, x_2, \ldots)$ with $x_n \in S$, equipped with the product $\sigma$-algebra $\mathscr{F} = \mathscr{S}^{\otimes \mathbb{N}}$. @@ -100,32 +88,29 @@ restrictions to the history up to time $n$. ### Absolute continuity -```{admonition} Definition -:class: tip -**Absolute continuity.** -$P$ is *absolutely continuous* with respect to $Q$, written $P \ll Q$, if +```{prf:definition} Absolute Continuity +:label: absolute_continuity + +$P$ is **absolutely continuous** with respect to $Q$, written $P \ll Q$, if $Q(A) = 0$ implies $P(A) = 0$ for every $A \in \mathscr{F}$. -They are *mutually absolutely continuous* (or *equivalent*), written $P \sim Q$, +They are **mutually absolutely continuous**, or **equivalent**, written $P \sim Q$, if both $P \ll Q$ and $Q \ll P$. -$P$ is *locally absolutely continuous* with respect to $Q$ if $P_n \ll Q_n$ +$P$ is **locally absolutely continuous** with respect to $Q$ if $P_n \ll Q_n$ for every $n \geq 1$. Global absolute continuity $P \ll Q$ implies local absolute continuity, but not conversely. ``` -Mutual absolute continuity has a natural interpretation: $P \sim Q$ means the two -agents agree on which individual events are *possible*. +Mutual absolute continuity means the two agents agree on which events are *possible*. -They can disagree about -how *likely* those events are, but neither agent considers an event impossible -that the other considers possible. +They can disagree about probabilities, but neither agent rules out an event the other deems possible. ### Total variation distance -```{admonition} Definition -:class: tip -**Total variation distance.** +```{prf:definition} Total Variation Distance +:label: total_variation_distance + For two probability measures $\mu$ and $\nu$ on $(E, \mathscr{E})$, $$ @@ -145,26 +130,22 @@ $$ \|\mu - \nu\|_{\mathrm{TV}} = \mathbb{E}_\nu[(f-1)^+] = 1 - \mathbb{E}_\nu[\min(f,1)]. $$ -The total variation distance is the **strongest** notion of convergence of -probability measures. If two probability measures are close in total variation, -they are close in every possible statistical sense: their probabilities of any -event differ by at most $\|\mu - \nu\|_{\mathrm{TV}}$. +Total variation is one of the strongest standard notions of distance between probability measures. + +If two measures are close in total variation, then their probabilities of every event are close. ### The merging question -The Blackwell–Dubins theorem is about the conditional distributions of the -**future** given the **past**. +The Blackwell–Dubins theorem studies the conditional distribution of the *future* given the *past*. -At time $n$, after observing $(x_1,\ldots,x_n)$, -each agent forms a conditional distribution over all future events: +At time $n$, after observing $(x_1,\ldots,x_n)$, each agent forms a conditional distribution over all future events: $$ P(\,\cdot\,|\,\mathscr{F}_n)(\omega), \qquad Q(\,\cdot\,|\,\mathscr{F}_n)(\omega). $$ -These are probability measures on the whole future path, not just the next -observation. +These are probability measures on the whole future path, not just the next observation. The merging question asks whether @@ -176,14 +157,14 @@ $$ almost surely as $n \to \infty$. -## The Likelihood-Ratio Martingale +## The likelihood-ratio martingale -The main mathematical tool is the **Radon–Nikodym derivative process**. +Our main tool is the Radon–Nikodym derivative process. ### The likelihood ratio Since $P \ll Q$ implies $P_n \ll Q_n$ for every $n$, the Radon–Nikodym -theorem guarantees the existence of the **likelihood ratio** +theorem guarantees the existence of the likelihood ratio $$ Z_n = \frac{dP_n}{dQ_n}, \qquad Z_n \geq 0 \;\; Q\text{-a.s.}, @@ -198,11 +179,12 @@ $$ Z_n = \mathbb{E}_Q[Z \,|\, \mathscr{F}_n] \qquad Q\text{-a.s.} $$ -That is, $\{Z_n, \mathscr{F}_n\}_{n \geq 1}$ is a **non-negative, uniformly -integrable $Q$-martingale**. +That is, $\{Z_n, \mathscr{F}_n\}_{n \geq 1}$ is a non-negative, uniformly +integrable $Q$-martingale. + +```{prf:lemma} Martingale Convergence +:label: martingale_convergence -```{admonition} Lemma (Martingale convergence) -:class: note The likelihood-ratio process $\{Z_n\}$ satisfies: 1. $Z_n \to Z_\infty$ $Q$-almost surely as $n \to \infty$. @@ -218,8 +200,7 @@ $L^1(Q)$ convergence. $\square$ ### Connecting conditional measures to the likelihood ratio -The following identity is the key bridge between the likelihood ratio and the -conditional distributions. +The following identity connects the likelihood ratio to the conditional distributions. On the set $\{Z_n > 0\}$, the Radon–Nikodym derivative of $P(\,\cdot\,|\,\mathscr{F}_n)$ with respect to $Q(\,\cdot\,|\,\mathscr{F}_n)$ @@ -247,15 +228,14 @@ $$ 2\,\mathbb{E}_Q[d_n] \;\leq\; \mathbb{E}_Q[|Z_\infty - Z_n|], $$ -so the $L^1$ convergence of the martingale directly controls the rate at -which the total variation distance between the two agents' conditional -forecasts goes to zero. +So the $L^1$ convergence of the martingale controls how fast the total variation distance goes to zero. + +## The Blackwell–Dubins theorem -## The Blackwell–Dubins Theorem +```{prf:theorem} Blackwell–Dubins (1962) +:label: blackwell_dubins -```{admonition} Theorem (Blackwell–Dubins, 1962) -:class: important Let $P$ and $Q$ be probability measures on $(\Omega, \mathscr{F})$ with $P \ll Q$. Define @@ -268,17 +248,17 @@ Then $d_n \to 0$ almost surely under $Q$ (and hence also under $P$). ### Proof ingredients -The proof has three steps, each transparent once the framework is in place. +The proof has three steps. + +Step 1. Representation of $d_n$ via $Z_n$. +As shown above, $d_n$ can be written in terms of $Z_\infty / Z_n$. + +This reduces the problem to a statement about one martingale under $Q$. -**Step 1 — Representation of $d_n$ via $Z_n$.** -As derived above, $d_n$ is expressed in terms of $Z_\infty / Z_n$ via the -conditional Bayes formula. This reduces the problem from a statement about -two different probability measures to a statement about a single martingale -under $Q$. +Step 2. $\{d_n\}$ is a $Q$-supermartingale. +Conditioning on more information reduces distinguishability on average. -**Step 2 — $\{d_n\}$ is a $Q$-supermartingale.** -Conditioning on more information cannot make the two measures easier to -distinguish; it can only make them harder. Formally, because +Formally, because $P(\,\cdot\,|\,\mathscr{F}_n) = \mathbb{E}[P(\,\cdot\,|\,\mathscr{F}_{n+1})\,|\,\mathscr{F}_n]$ and total variation is convex, @@ -286,48 +266,47 @@ $$ \mathbb{E}_Q[d_{n+1}\,|\,\mathscr{F}_n] \leq d_n \qquad Q\text{-a.s.} $$ -So $\{d_n, \mathscr{F}_n\}$ is a non-negative $Q$-supermartingale taking -values in $[0,1]$. +So $\{d_n, \mathscr{F}_n\}$ is a non-negative $Q$-supermartingale in $[0,1]$. -By Doob's supermartingale convergence theorem, $d_n \to d_\infty$ -$Q$-almost surely for some $[0,1]$-valued random variable $d_\infty$. +By Doob's theorem, $d_n \to d_\infty$ $Q$-almost surely for some $[0,1]$-valued random variable $d_\infty$. -**Step 3 — The almost-sure limit is zero.** +Step 3. The almost-sure limit is zero. From Step 1 and the $L^1$ bound: $$ \mathbb{E}_Q[d_n] \leq \tfrac{1}{2}\,\mathbb{E}_Q[|Z_\infty - Z_n|] \to 0. $$ -Hence $d_n \to 0$ in $L^1(Q)$, so $d_n \to 0$ in probability under $Q$. +Hence $d_n \to 0$ in $L^1(Q)$ and therefore in probability. -But $d_n \to d_\infty$ $Q$-a.s. and $d_n \to 0$ in probability together -force $d_\infty = 0$ $Q$-a.s. +Since $d_n$ already converges almost surely, its limit must satisfy $d_\infty = 0$ $Q$-a.s. -Since $P \ll Q$, every $Q$-null set is -$P$-null, so $d_n \to 0$ $P$-a.s. as well. $\square$ +Because $P \ll Q$, the same conclusion also holds $P$-almost surely. $\square$ + +```{prf:remark} One-Sided vs. Mutual Absolute Continuity +:label: one_sided_vs_mutual -```{admonition} Remark (One-sided vs. mutual absolute continuity) -:class: note The theorem requires only $P \ll Q$, not $Q \ll P$. Under one-sided absolute continuity, merging holds $Q$-a.s. (and hence -$P$-a.s.). If additionally $Q \ll P$ — that is, $P \sim Q$ — then merging +$P$-a.s.). If additionally $Q \ll P$, that is, if $P \sim Q$, then merging holds under *both* agents' measures: neither agent has a positive-probability path on which the other agent's beliefs remain permanently different. ``` -```{admonition} Remark (Tightness) -:class: note -The theorem is sharp. If $P \perp Q$ (mutual singularity), then there -exists a set $A$ with $P(A) = 1$ and $Q(A) = 0$. By Lévy's zero-one -law, $Q(A|\mathscr{F}_n) \to 0$ and $P(A|\mathscr{F}_n) \to 1$ -almost surely, so $d_n \to 1$ rather than zero. Absolute continuity is -not merely sufficient; the dichotomy between $P \ll Q$ and $P \perp Q$ -is qualitatively sharp. +```{prf:remark} Sharpness +:label: sharpness + +Absolute continuity matters. + +When $P$ and $Q$ are singular, merging can fail completely. + +The point-mass example below has $d_n = 1$ for every $n$. + +For product measures, Kakutani's theorem later gives a sharp equivalence-versus-singularity dichotomy. ``` -## The Beta–Bernoulli Model +## The Beta–Bernoulli model Before turning to Python, we introduce the main example used throughout the simulations. @@ -404,11 +383,10 @@ d_n - \mathrm{Beta}(\alpha_2 + k_n,\,\beta_2 + n - k_n)\bigr\|_{\mathrm{TV}}. $$ -As $k_n/n \to p^*$ and $n \to \infty$, both posterior Betas concentrate -around $p^*$ with the same variance $O(1/n)$, so $d_n \to 0$. +As $k_n/n \to p^*$ and $n \to \infty$, both posterior Betas concentrate around $p^*$ with variance of order $1/n$, so $d_n \to 0$. -## Python: Merging in Action +## Python: merging in action We set up helper functions and then run the main simulation. @@ -488,7 +466,7 @@ def run_simulation(p_true, a1, b1, a2, b2, n_steps, seed=0): pred2 = predictive_prob(a2p, b2p) tv_1step = np.abs(pred1 - pred2) - # TV between posterior Betas — the exact Blackwell-Dubins d_n + # TV between posterior Betas; in this model this equals d_n tv_beta = np.array([ tv_distance_beta(a1p[i], b1p[i], a2p[i], b2p[i]) for i in range(n_steps + 1) @@ -502,24 +480,33 @@ def run_simulation(p_true, a1, b1, a2, b2, n_steps, seed=0): ### The main merging figure -We choose two agents with very different beliefs about the bias of a coin -whose true probability of heads is $p^* = 0.65$. +We choose two agents with very different beliefs about the bias of a coin whose true probability of heads is $p^* = 0.65$. -- **Agent 1** (skeptic): prior $\mathrm{Beta}(1, 8)$, so +- Agent 1 (skeptic): prior $\mathrm{Beta}(1, 8)$, so $\hat{p}_1^0 = 1/9 \approx 0.11$. -- **Agent 2** (optimist): prior $\mathrm{Beta}(8, 1)$, so +- Agent 2 (optimist): prior $\mathrm{Beta}(8, 1)$, so $\hat{p}_2^0 = 8/9 \approx 0.89$. Both priors are supported on all of $(0,1)$, so $P_1 \sim P_2$. -Blackwell–Dubins guarantees merging; the question is how fast. +Blackwell–Dubins guarantees merging. + +The figure below shows what that merging looks like. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Merging in the Beta–Bernoulli example. + The four panels show posterior predictive means, the total-variation distance $d_n$, the likelihood-ratio martingale, and posterior densities at selected horizons. + name: fig-merging-of-opinions-beta-bernoulli +--- # ------------------------------------------------------------------------- # Simulation parameters # ------------------------------------------------------------------------- p_true = 0.65 -a1, b1 = 1.0, 8.0 # sceptic: prior mean = 1/9 ≈ 0.11 +a1, b1 = 1.0, 8.0 # skeptic: prior mean = 1/9 ≈ 0.11 a2, b2 = 8.0, 1.0 # optimist: prior mean = 8/9 ≈ 0.89 n_steps = 600 @@ -530,62 +517,62 @@ steps = np.arange(n_steps + 1) # Figure 1: merging of predictive distributions and TV distance # ------------------------------------------------------------------------- fig, axes = plt.subplots(2, 2, figsize=(11, 7)) -fig.suptitle("Merging of Opinions: Beta–Bernoulli Model", fontsize=13) # --- Panel (a): posterior predictive probabilities --- ax = axes[0, 0] -ax.plot(steps, sim['pred1'], color='steelblue', lw=1.5, - label=r'Agent 1 $\hat p_1^n$ (prior: sceptic)') -ax.plot(steps, sim['pred2'], color='firebrick', lw=1.5, - label=r'Agent 2 $\hat p_2^n$ (prior: optimist)') +ax.plot(steps, sim['pred1'], color='steelblue', lw=2, + label=r'Agent 1 $\hat p_1^n$ (prior: skeptic)') +ax.plot(steps, sim['pred2'], color='firebrick', lw=2, + label=r'Agent 2 $\hat p_2^n$ (prior: optimist)') ax.axhline(p_true, color='black', lw=1.0, ls='--', label=f'Truth $p^*={p_true}$') -ax.set_xlabel('Observations $n$') -ax.set_ylabel('Predictive probability') -ax.set_title('(a) Posterior predictive means') +ax.set_xlabel('observations $n$') +ax.set_ylabel('predictive probability') +ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) ax.legend(fontsize=8) ax.set_ylim(0, 1) # --- Panel (b): TV distance (exact Blackwell-Dubins d_n) --- ax = axes[0, 1] -ax.semilogy(steps, sim['tv_beta'] + 1e-10, color='purple', lw=1.5) -ax.set_xlabel('Observations $n$') +ax.semilogy(steps, sim['tv_beta'] + 1e-10, color='mediumpurple', lw=2) +ax.set_xlabel('observations $n$') ax.set_ylabel(r'$d_n = \|P(\cdot|\mathscr{F}_n) - Q(\cdot|\mathscr{F}_n)\|_{\mathrm{TV}}$') -ax.set_title(r'(b) Total-variation distance $d_n \to 0$') +ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) ax.set_ylim(bottom=1e-4) # --- Panel (c): log likelihood ratio --- ax = axes[1, 0] -ax.plot(steps, sim['log_Z'], color='darkorange', lw=1.5) +ax.plot(steps, sim['log_Z'], color='darkorange', lw=2) ax.axhline(0, color='black', lw=0.8, ls=':') -ax.set_xlabel('Observations $n$') +ax.set_xlabel('observations $n$') ax.set_ylabel(r'$\log Z_n$') -ax.set_title(r'(c) Log likelihood ratio $\log Z_n \to \log Z_\infty$') +ax.text(0.03, 0.93, '(c)', transform=ax.transAxes) # --- Panel (d): posterior Beta densities at selected epochs --- ax = axes[1, 1] xs = np.linspace(0.01, 0.99, 500) epochs = [0, 20, 100, n_steps] colors = plt.cm.viridis(np.linspace(0.2, 0.85, len(epochs))) -cum_k = int(np.sum(sim['data'])) # total successes in full sample for epoch, col in zip(epochs, colors): - k_e = int(np.sum(sim['data'][:epoch])) + k_e = int(np.sum(sim['data'][:epoch])) pdf1 = beta_dist.pdf(xs, a1 + k_e, b1 + epoch - k_e) pdf2 = beta_dist.pdf(xs, a2 + k_e, b2 + epoch - k_e) - ax.plot(xs, pdf1, color=col, lw=1.6, ls='-') - ax.plot(xs, pdf2, color=col, lw=1.6, ls='--', - label=f'$n={epoch}$' if (epoch == 0 or epoch == n_steps) else None) + ax.plot(xs, pdf1, color=col, lw=2, ls='-') + ax.plot(xs, pdf2, color=col, lw=2, ls='--') ax.axvline(p_true, color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$') ax.set_xlabel('$p$') -ax.set_ylabel('Posterior density') -ax.set_title('(d) Posterior Beta densities over time\n' - '(solid = Agent 1, dashed = Agent 2)') -# Custom legend +ax.set_ylabel('posterior density') +ax.text(0.03, 0.93, '(d)', transform=ax.transAxes) + from matplotlib.lines import Line2D -handles = [Line2D([0],[0], color=colors[0], lw=1.6, label='$n=0$ (prior)'), - Line2D([0],[0], color=colors[-1], lw=1.6, label=f'$n={n_steps}$'), - Line2D([0],[0], color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$')] +handles = [ + Line2D([0], [0], color='black', lw=2, label='agent 1'), + Line2D([0], [0], color='black', lw=2, ls='--', label='agent 2'), + Line2D([0], [0], color=colors[0], lw=2, label='$n=0$'), + Line2D([0], [0], color=colors[-1], lw=2, label=f'$n={n_steps}$'), + Line2D([0], [0], color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$') +] ax.legend(handles=handles, fontsize=8) ax.set_ylim(bottom=0) @@ -595,26 +582,33 @@ plt.show() The four panels tell a coherent story: -- **Panel (a)**: Starting from $\hat{p}_1^0 \approx 0.11$ and +- Panel (a): Starting from $\hat{p}_1^0 \approx 0.11$ and $\hat{p}_2^0 \approx 0.89$, both agents' predictive probabilities converge to $p^* = 0.65$. -- **Panel (b)**: The total-variation distance $d_n$ decays to zero on a +- Panel (b): The total-variation distance $d_n$ decays to zero on a logarithmic scale, consistent with the theorem. -- **Panel (c)**: The log likelihood ratio $\log Z_n$ converges to a finite - value — confirming that the two measures are mutually absolutely continuous - and that neither singular case applies. -- **Panel (d)**: The posterior Beta densities for the two agents start far +- Panel (c): The log likelihood ratio $\log Z_n$ converges to a finite + value, which is consistent with mutual absolute continuity in this example. +- Panel (d): The posterior Beta densities for the two agents start far apart (one near 0, one near 1) and progressively concentrate to the same distribution centred on the truth. -## Almost-Sure Convergence Across Many Paths +## Almost-sure convergence across many paths + +To illustrate the almost-sure character of the theorem, we run many independent replications. -To see the "almost-sure" character of the theorem, we run many independent -replications. On *every* path the TV distance should converge to zero, -not just on average. +The theorem concerns almost every path under the reference measure, not just averages across paths. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Almost-sure merging across many sample paths. + The left panel plots the total-variation distance and the right panel plots the likelihood-ratio martingale. + name: fig-merging-of-opinions-many-paths +--- # ------------------------------------------------------------------------- # Simulate N_paths independent realisations # ------------------------------------------------------------------------- @@ -622,7 +616,6 @@ N_paths = 80 n_steps = 500 fig, axes = plt.subplots(1, 2, figsize=(11, 4)) -fig.suptitle("Almost-sure merging across sample paths", fontsize=12) ax_tv = axes[0] ax_log = axes[1] @@ -639,24 +632,24 @@ for i in range(N_paths): # --- Panel (a): TV distance paths --- for i in range(N_paths): ax_tv.semilogy(steps, tv_all[i] + 1e-10, color='steelblue', - lw=0.5, alpha=0.3) + lw=0.8, alpha=0.3) ax_tv.semilogy(steps, tv_all.mean(axis=0) + 1e-10, - color='black', lw=2, label='Cross-path mean') -ax_tv.set_xlabel('Observations $n$') -ax_tv.set_ylabel(r'$d_n$ (log scale)') -ax_tv.set_title(r'(a) TV distance $d_n \to 0$ on every path') + color='black', lw=2, label='mean across paths') +ax_tv.set_xlabel('observations $n$') +ax_tv.set_ylabel(r'$d_n$ (log scale)') +ax_tv.text(0.03, 0.93, '(a)', transform=ax_tv.transAxes) ax_tv.legend() # --- Panel (b): log Z_n paths --- for i in range(N_paths): ax_log.plot(steps, logZ_all[i], color='firebrick', - lw=0.5, alpha=0.3) + lw=0.8, alpha=0.3) ax_log.plot(steps, logZ_all.mean(axis=0), - color='black', lw=2, label='Cross-path mean') + color='black', lw=2, label='mean across paths') ax_log.axhline(0, color='gray', lw=0.8, ls=':') -ax_log.set_xlabel('Observations $n$') +ax_log.set_xlabel('observations $n$') ax_log.set_ylabel(r'$\log Z_n$') -ax_log.set_title(r'(b) Likelihood ratio $\log Z_n$ converges on every path') +ax_log.text(0.03, 0.93, '(b)', transform=ax_log.transAxes) ax_log.legend() plt.tight_layout() @@ -667,21 +660,28 @@ frac_small = np.mean(tv_all[:, -1] < 0.01) print(f"Fraction of paths with d_n < 0.01 at n = {n_steps}: {frac_small:.2f}") ``` -As predicted, $d_n \to 0$ on essentially every sample path: -the theorem gives an almost-sure guarantee, not merely a statement about -expected values. +In this simulation, the distances are small on almost all sampled paths by the final horizon. + +That is consistent with the theorem's almost-sure conclusion. -## The Supermartingale Property of $d_n$ +## The supermartingale property of $d_n$ The proof relies on $\{d_n\}$ being a non-negative supermartingale. -We can verify this numerically by checking that $d_n$ tends to decrease -over time and that its conditional expectation does not increase. +We can illustrate this numerically by looking at average increments across many paths. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + An illustration of the supermartingale property. + The plots show average increments of $d_n$ and their cumulative sum across many simulated paths. + name: fig-merging-of-opinions-supermartingale +--- # ------------------------------------------------------------------------- -# Verify the supermartingale property: +# Illustrate the supermartingale property: # E_Q[d_{n+1} | F_n] <= d_n # ------------------------------------------------------------------------- # Proxy: average d_{n+1} - d_n across many paths should be <= 0. @@ -693,25 +693,25 @@ cum_sum = np.cumsum(mean_diffs) # cumulative average change fig, axes = plt.subplots(1, 2, figsize=(10, 4)) ax = axes[0] -ax.plot(mean_diffs[:200], color='purple', lw=1.2) +ax.plot(mean_diffs[:200], color='purple', lw=2) ax.axhline(0, color='black', lw=0.8, ls='--') ax.fill_between(range(200), mean_diffs[:200], 0, - where=(mean_diffs[:200] < 0), - alpha=0.25, color='purple', label='Decrements (negative)') + where=(mean_diffs[:200] < 0), alpha=0.25, + color='purple', label='negative increments') ax.fill_between(range(200), mean_diffs[:200], 0, - where=(mean_diffs[:200] > 0), - alpha=0.25, color='red', label='Increments (positive)') -ax.set_xlabel('Observations $n$') + where=(mean_diffs[:200] > 0), alpha=0.25, + color='red', label='positive increments') +ax.set_xlabel('observations $n$') ax.set_ylabel(r'$\mathbb{E}[d_{n+1} - d_n]$') -ax.set_title(r'(a) Average increments of $d_n$') +ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) ax.legend(fontsize=8) ax = axes[1] -ax.plot(cum_sum[:200], color='darkorange', lw=1.5) +ax.plot(cum_sum[:200], color='darkorange', lw=2) ax.axhline(0, color='black', lw=0.8, ls='--') -ax.set_xlabel('Observations $n$') -ax.set_ylabel(r'Cumulative average change in $d_n$') -ax.set_title(r'(b) Cumulative drift: net decrease confirms supermartingale') +ax.set_xlabel('observations $n$') +ax.set_ylabel(r'cumulative average change in $d_n$') +ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) plt.tight_layout() plt.show() @@ -720,50 +720,55 @@ frac_decrease = np.mean(mean_diffs < 0) print(f"Fraction of steps with average decrement: {frac_decrease:.2%}") ``` -The average increment is negative at most steps, and the cumulative drift -is monotonically downward. This is the numerical signature of the -supermartingale property $\mathbb{E}_Q[d_{n+1}\,|\,\mathscr{F}_n] \leq d_n$. +The average increment is negative at most steps, and the cumulative drift is downward. +This is only an illustration, not a proof, because it uses unconditional averages rather than the full conditional expectation in the theorem. -## Failure of Merging: Mutual Singularity -What happens when the hypothesis of the theorem fails — that is, when -$P \not\ll Q$? +## Failure of merging: mutual singularity -The answer is both instructive and stark. +What happens when the hypothesis $P \ll Q$ fails? + +The singular case is the cleanest counterexample. ### Point-mass priors Suppose both agents hold degenerate (point-mass) priors: -- **Agent P**: certain that $p = p_P = 0.30$. -- **Agent Q**: certain that $p = p_Q = 0.75$. +- Agent P: certain that $p = p_P = 0.30$. +- Agent Q: certain that $p = p_Q = 0.75$. + +Since $P$ charges only sequences whose empirical frequency converges to $0.30$, and $Q$ charges only sequences whose empirical frequency converges to $0.75$, the two measures are mutually singular: $P \perp Q$. -Since $P$ charges only sequences whose empirical frequency converges to -$0.30$, and $Q$ charges only sequences whose empirical frequency converges -to $0.75$, the two measures are mutually **singular**: $P \perp Q$. +The conditional distributions do not update, because both agents are already certain of their model. -The conditional distributions do not update — both agents are certain of -their model — so +For the theorem's object, namely the conditional law of the entire future path, $$ \|P(\,\cdot\,|\,\mathscr{F}_n) - Q(\,\cdot\,|\,\mathscr{F}_n)\|_{\mathrm{TV}} -= \|P - Q\|_{\mathrm{TV}} = |p_P - p_Q| = 0.45 += \|P - Q\|_{\mathrm{TV}} = 1 \quad \text{for all } n. $$ -The empirical frequency converges to the truth under the true measure, -which means one agent's model is eventually falsified (and indeed, under -$Q$, the empirical frequency $\to 0.75$ reveals that agent $P$'s model -is wrong with probability 1). +This equality holds because the infinite-product Bernoulli measures with distinct success probabilities are singular. + +If we look only one step ahead, the predictive distance is $|p_P - p_Q| = 0.45$. + +That is smaller than one, but it is not the quantity that appears in Blackwell–Dubins. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Failure of merging under singular priors. + The right panel separates the full future-path distance, which stays at one, from the one-step predictive gap, which stays at $|p_P - p_Q|$. + name: fig-merging-of-opinions-singular-priors +--- # ------------------------------------------------------------------------- # Failure of merging: mutually singular point-mass priors # ------------------------------------------------------------------------- fig, axes = plt.subplots(1, 2, figsize=(11, 4)) -fig.suptitle("Failure of Merging: Mutually Singular Priors ($P \\perp Q$)", - fontsize=12) # True data drawn under Q's model (p_Q = 0.75) p_P = 0.30 # agent P's fixed belief @@ -776,8 +781,9 @@ data = rng.binomial(1, p_Q, n_steps) # Empirical frequency of successes emp_freq = np.cumsum(data) / np.arange(1, n_steps + 1) -# 1-step predictive TV distance (constant: |p_P - p_Q|) -tv_singular = np.full(n_steps, np.abs(p_P - p_Q)) +# Full future-path TV distance and one-step predictive TV distance +tv_singular_full = np.ones(n_steps + 1) +tv_singular_1step = np.full(n_steps + 1, np.abs(p_P - p_Q)) # For comparison: run a Beta-Bernoulli merging experiment with the same truth sim_abs_cont = run_simulation(p_Q, 1.0, 8.0, 8.0, 1.0, n_steps, seed=1) @@ -785,47 +791,48 @@ sim_abs_cont = run_simulation(p_Q, 1.0, 8.0, 8.0, 1.0, n_steps, seed=1) # --- Panel (a): empirical frequency --- ax = axes[0] ax.plot(np.arange(1, n_steps + 1), emp_freq, - color='steelblue', lw=1.5, label='Empirical frequency $k_n/n$') + color='steelblue', lw=2, label='empirical frequency $k_n/n$') ax.axhline(p_Q, color='firebrick', lw=1.2, ls='--', - label=f'Truth $p_Q = {p_Q}$') -ax.axhline(p_P, color='gray', lw=1.2, ls=':', + label=f'truth $p_Q = {p_Q}$') +ax.axhline(p_P, color='gray', lw=1.2, ls=':', label=f"Agent P's belief $p_P = {p_P}$") -ax.set_xlabel('Observations $n$') -ax.set_ylabel('Probability') -ax.set_title('(a) Empirical frequency converges to truth') +ax.set_xlabel('observations $n$') +ax.set_ylabel('probability') +ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) ax.legend(fontsize=8) ax.set_ylim(0, 1) # --- Panel (b): TV distance comparison --- ax = axes[1] -ax.plot(np.arange(1, n_steps + 1), tv_singular, - color='firebrick', lw=2.0, - label=r'Singular priors: $d_n = |p_P - p_Q| = 0.45$') -ax.semilogy(np.arange(n_steps + 1), - sim_abs_cont['tv_beta'] + 1e-10, - color='steelblue', lw=2.0, - label=r'$\mathrm{Beta}(1,8)$ vs $\mathrm{Beta}(8,1)$: $d_n \to 0$') -ax.set_xlabel('Observations $n$') +ax.plot(np.arange(n_steps + 1), tv_singular_full, + color='firebrick', lw=2, + label=r'singular priors: full-path $d_n = 1$') +ax.plot(np.arange(n_steps + 1), tv_singular_1step, + color='gray', lw=2, ls=':', + label=r'one-step predictive gap $= |p_P - p_Q|$') +ax.plot(np.arange(n_steps + 1), sim_abs_cont['tv_beta'], + color='steelblue', lw=2, + label=r'$\mathrm{Beta}(1,8)$ vs $\mathrm{Beta}(8,1)$') +ax.set_xlabel('observations $n$') ax.set_ylabel(r'$d_n$') -ax.set_title('(b) TV distance: merging vs non-merging') +ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) ax.legend(fontsize=8) +ax.set_ylim(0, 1.05) plt.tight_layout() plt.show() ``` -The contrast is vivid. With mutually absolutely continuous priors (blue), -the total-variation distance decays to zero as Blackwell–Dubins guarantees. -With mutually singular point-mass priors (red), the distance stays -permanently at $|p_P - p_Q| = 0.45$. +The contrast is sharp. + +With mutually absolutely continuous priors, $d_n$ decays to zero. -More data never resolves the -disagreement — the two agents are committed to models that are -separated by events they each regard as having probability zero under -the other's measure. +With singular point-mass priors, the full future-path distance stays at one forever. +More data does not reconcile the agents, because each rules out paths the other assigns positive probability. -## Kakutani's Theorem: When Does Merging Hold? + +## Kakutani's theorem: when does merging hold? A natural question is: for which product measures does the Blackwell–Dubins hypothesis $P \ll Q$ hold? For infinite product measures, the answer is @@ -833,11 +840,11 @@ given by a classical result of {cite:t}`kakutani1948`. ### Hellinger affinities -```{admonition} Definition -:class: tip -**Hellinger affinity.** +```{prf:definition} Hellinger Affinity +:label: hellinger_affinity + For probability measures $P_n$ and $Q_n$ on $(S, \mathscr{S})$ with common -dominating measure $\lambda$, the *Hellinger affinity* is +dominating measure $\lambda$, the **Hellinger affinity** is $$ \rho_n = \int_S \sqrt{\frac{dP_n}{d\lambda} \cdot \frac{dQ_n}{d\lambda}}\,d\lambda @@ -849,13 +856,13 @@ $\rho_n = 1$ if and only if $P_n = Q_n$. For two specific one-dimensional families: -- **Gaussian**: $P_n = \mathcal{N}(\mu_n, 1)$ vs $Q_n = \mathcal{N}(0,1)$: +- Gaussian: $P_n = \mathcal{N}(\mu_n, 1)$ vs $Q_n = \mathcal{N}(0,1)$: $$ \rho_n^{\text{Gauss}} = \exp\!\left(-\frac{\mu_n^2}{8}\right). $$ -- **Bernoulli**: $P_n = \mathrm{Bernoulli}(p)$ vs $Q_n = \mathrm{Bernoulli}(q)$: +- Bernoulli: $P_n = \mathrm{Bernoulli}(p)$ vs $Q_n = \mathrm{Bernoulli}(q)$: $$ \rho_n^{\text{Bern}} = \sqrt{pq} + \sqrt{(1-p)(1-q)}. @@ -863,8 +870,9 @@ $$ ### Kakutani's dichotomy -```{admonition} Theorem (Kakutani, 1948) -:class: important +```{prf:theorem} Kakutani (1948) +:label: kakutani_dichotomy + Let $P = \bigotimes_{n=1}^\infty P_n$ and $Q = \bigotimes_{n=1}^\infty Q_n$ be infinite product measures. Then either $P \sim Q$ or $P \perp Q$; there is no intermediate case. Specifically, @@ -879,13 +887,12 @@ $$ If $\prod_{n=1}^\infty \rho_n = 0$, then $P \perp Q$. -*Proof sketch.* -The likelihood ratio $Z_N = \prod_{n=1}^N (dP_n/dQ_n)$ is a $Q$-martingale. -Its $L^{1/2}(Q)$ norm equals $\prod_{n=1}^N \rho_n$. -If $\prod \rho_n > 0$: the martingale is bounded in $L^{1/2}$, hence -uniformly integrable in $L^1$, giving $P \ll Q$. -If $\prod \rho_n = 0$: $Z_N \to 0$ in $L^{1/2}$, so $Z_\infty = 0$ -$Q$-a.s. and $P \perp Q$. $\square$ +*Proof idea.* +A standard proof studies the likelihood-ratio martingale +$Z_N = \prod_{n=1}^N (dP_n/dQ_n)$ together with the identity +$\mathbb{E}_Q[\sqrt{Z_N}] = \prod_{n=1}^N \rho_n$. + +The product staying positive corresponds to equivalence, while the product collapsing to zero corresponds to singularity. $\square$ ``` ### Implication for merging @@ -895,31 +902,37 @@ For i.i.d.-type sequences, Kakutani's theorem gives the following picture: | Scenario | $\sum_n (1-\rho_n)$ | Conclusion | Merging? | |---|---|---|---| | $P_n = Q_n$ for all $n$ | $0$ | $P = Q$ | Trivially yes | -| $P_n \ne Q_n$ with $\sum_n (1-\rho_n) < \infty$ | Finite | $P \sim Q$ | Yes — Blackwell–Dubins applies | +| $P_n \ne Q_n$ with $\sum_n (1-\rho_n) < \infty$ | Finite | $P \sim Q$ | Yes; Blackwell–Dubins applies | | $P_n = P \ne Q = Q_n$ fixed, $n \ge 1$ | $\infty$ | $P \perp Q$ | No | -The i.i.d. case with different fixed marginals is the most common "no merging" -scenario: if two agents ascribe permanently different distributions to each -observation, they will eventually be in completely disjoint worlds. +The i.i.d. case with different fixed marginals is the standard no-merging example. + +If two agents assign permanently different distributions to each observation, they end up in disjoint probability worlds. -### Python: the Gaussian product measure example +### Python: a Gaussian product-measure example -We illustrate Kakutani's dichotomy with Gaussian product measures, -$Q = \mathcal{N}(0,1)^{\otimes\mathbb{N}}$ as the reference measure and -$P = \bigotimes_n \mathcal{N}(\mu_n,1)$ as the alternative. +We illustrate Kakutani's dichotomy with Gaussian product measures. + +Take $Q = \mathcal{N}(0,1)^{\otimes\mathbb{N}}$ as the reference measure and $P = \bigotimes_n \mathcal{N}(\mu_n,1)$ as the alternative. Three choices of $\mu_n$: -1. $\mu_n = \mu > 0$ **constant** ($\sum (1-\rho_n) = \infty$) $\Rightarrow P \perp Q$. +1. $\mu_n = \mu > 0$ constant ($\sum (1-\rho_n) = \infty$) $\Rightarrow P \perp Q$. 2. $\mu_n = c/\!\sqrt{n}$ ($\sum (1-\rho_n) \approx \sum c^2/(8n) = \infty$) $\Rightarrow P \perp Q$. 3. $\mu_n = c/n$ ($\sum (1-\rho_n) \approx \sum c^2/(8n^2) < \infty$) $\Rightarrow P \sim Q$. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Kakutani's dichotomy for Gaussian product measures. + Only the $\mu_n = c/n$ sequence produces a finite Hellinger sum and a nondegenerate limiting likelihood ratio. + name: fig-merging-of-opinions-kakutani-gaussian +--- # ------------------------------------------------------------------------- # Kakutani's theorem: Gaussian product measures # ------------------------------------------------------------------------- -from scipy.stats import norm as scipy_norm - def hellinger_affinity_gauss(mu_n): """Hellinger affinity between N(mu_n, 1) and N(0, 1).""" return np.exp(-mu_n**2 / 8.0) @@ -939,22 +952,19 @@ sequences = { } fig, axes = plt.subplots(1, 3, figsize=(13, 4)) -fig.suptitle(r"Kakutani's Dichotomy: Gaussian Product Measures ($c = 2$)", - fontsize=12) colors_k = ['firebrick', 'darkorange', 'steelblue'] -labels_k = list(sequences.keys()) # --- Panel (a): log Hellinger product log prod rho_n = sum log rho_n --- ax = axes[0] for (label, mu_seq), col in zip(sequences.items(), colors_k): rho = hellinger_affinity_gauss(mu_seq) log_prod = np.cumsum(np.log(rho)) - ax.plot(ns, log_prod, color=col, lw=1.8, label=label) + ax.plot(ns, log_prod, color=col, lw=2, label=label) ax.axhline(0, color='black', lw=0.8, ls=':') -ax.set_xlabel('$N$') +ax.set_xlabel('horizon $N$') ax.set_ylabel(r'$\log \prod_{n=1}^{N} \rho_n$') -ax.set_title(r'(a) Log Hellinger product') +ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) ax.legend(fontsize=7.5) # --- Panel (b): sum of (1 - rho_n) --- @@ -962,10 +972,10 @@ ax = axes[1] for (label, mu_seq), col in zip(sequences.items(), colors_k): rho = hellinger_affinity_gauss(mu_seq) cum_sum = np.cumsum(1 - rho) - ax.plot(ns, cum_sum, color=col, lw=1.8, label=label) -ax.set_xlabel('$N$') + ax.plot(ns, cum_sum, color=col, lw=2, label=label) +ax.set_xlabel('horizon $N$') ax.set_ylabel(r'$\sum_{n=1}^{N}(1 - \rho_n)$') -ax.set_title(r'(b) Cumulative $\sum (1-\rho_n)$: finite $\Leftrightarrow$ $P \sim Q$') +ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) ax.legend(fontsize=7.5) # --- Panel (c): simulated log Z_N trajectories --- @@ -979,12 +989,12 @@ for (label, mu_seq), col in zip(sequences.items(), colors_k): # log Z_N = sum_{n=1}^N [mu_n * x_n - mu_n^2 / 2] log_Z_increments = mu_seq[:N_plot] * x - mu_seq[:N_plot]**2 / 2.0 log_Z_path = np.concatenate([[0], np.cumsum(log_Z_increments)]) - ax.plot(np.arange(N_plot + 1), log_Z_path, color=col, lw=1.5, label=label) + ax.plot(np.arange(N_plot + 1), log_Z_path, color=col, lw=2, label=label) ax.axhline(0, color='black', lw=0.8, ls=':') -ax.set_xlabel('$N$') -ax.set_ylabel(r'$\log Z_N$ (one trajectory under $Q$)') -ax.set_title('(c) Likelihood ratio: diverges or converges') +ax.set_xlabel('horizon $N$') +ax.set_ylabel(r'$\log Z_N$ under $Q$') +ax.text(0.03, 0.93, '(c)', transform=ax.transAxes) ax.legend(fontsize=7.5) plt.tight_layout() @@ -993,21 +1003,21 @@ plt.show() The three panels confirm Kakutani's theorem: -- **Constant drift** (red): $\log \prod \rho_n \to -\infty$ and +- Constant drift (red): $\log \prod \rho_n \to -\infty$ and $\sum (1-\rho_n) \to \infty$; the likelihood ratio drifts to $-\infty$ under $Q$, so $Z_N \to 0$ $Q$-a.s. and $P \perp Q$. -- **$\mu_n = c/\sqrt{n}$** (orange): the same qualitative picture. +- $\mu_n = c/\sqrt{n}$ (orange): the same qualitative picture. Despite the drift vanishing, it does so too slowly. -- **$\mu_n = c/n$** (blue): $\sum (1-\rho_n) < \infty$, the log Hellinger - product stabilises to a finite limit, and the likelihood ratio converges - to a finite positive value — confirming $P \sim Q$. +- $\mu_n = c/n$ (blue): $\sum (1-\rho_n) < \infty$, the log Hellinger + product stabilises to a finite limit, and the simulated likelihood ratio + remains nondegenerate, which is consistent with $P \sim Q$. Only in the third case does Blackwell–Dubins apply and merging occur. -## Extension to Continuous Time +## Extension to continuous time -The Blackwell–Dubins theorem extends naturally to continuous time. +The same logic extends to continuous time. ### Girsanov's theorem and the likelihood-ratio process @@ -1019,8 +1029,8 @@ $$ W_t = \widetilde{W}_t + \int_0^t \theta_s\, ds, $$ -where $\widetilde{W}$ is a $P$-Brownian motion. The -**Girsanov–Cameron–Martin theorem** {cite:p}`girsanov1960` gives the +where $\widetilde{W}$ is a $P$-Brownian motion. +The Girsanov–Cameron–Martin theorem {cite:p}`girsanov1960` gives the likelihood-ratio process as the stochastic exponential $$ @@ -1031,16 +1041,17 @@ $$ $Z_t$ is always a non-negative $Q$-local martingale: it is a true martingale if and only if $\mathbb{E}_Q[Z_t] = 1$ for all $t$. -**Novikov's condition** {cite:p}`novikov1972` — $\mathbb{E}_Q\!\left[\exp\!\left(\tfrac{1}{2}\int_0^T \theta_s^2\,ds\right)\right] < \infty$ for all $T$ — is sufficient. +Novikov's condition {cite:p}`novikov1972`, +$\mathbb{E}_Q\!\left[\exp\!\left(\tfrac{1}{2}\int_0^T \theta_s^2\,ds\right)\right] < \infty$ for all $T$, +is sufficient. ### The dichotomy at infinity -A key subtlety on $[0,+\infty)$: local absolute continuity ($P_t \ll Q_t$ -for every finite $t$) does *not* imply global absolute continuity -($P \ll Q$ on $\mathscr{F}_\infty$). +A key subtlety on $[0,+\infty)$ is that local absolute continuity does *not* imply global absolute continuity on $\mathscr{F}_\infty$. + +```{prf:proposition} Dichotomy at Infinity +:label: dichotomy_at_infinity -```{admonition} Proposition (Dichotomy at infinity) -:class: note Suppose $Z_t$ is a true $Q$-martingale for every finite horizon. Then $Z_t \to Z_\infty$ $Q$-a.s., and exactly one of the following holds: @@ -1051,18 +1062,15 @@ $Z_t \to Z_\infty$ $Q$-a.s., and exactly one of the following holds: $Z_\infty = 0$ $Q$-a.s. and $P \perp Q$ on $\mathscr{F}_\infty$. ``` -A sufficient condition for case 1 is the **energy condition** +A convenient sufficient condition in deterministic-drift examples is the **energy condition** $$ \int_0^\infty \theta_s^2\,ds < \infty \quad Q\text{-a.s.} $$ -Informally, this says the total amount of information that separates the -two measures over the infinite horizon is finite. +Informally, this says the total amount of information separating the two measures over the infinite horizon is finite. -When $\theta$ is a -non-zero constant — meaning the two agents predict permanently different -drifts — the energy condition fails, $P \perp Q$, and merging cannot occur. +When $\theta$ is a non-zero constant, the condition fails, the measures are singular on $\mathscr{F}_\infty$, and merging does not occur. With $P \ll Q$ on $\mathscr{F}_\infty$ established, the proof of the continuous-time Blackwell–Dubins theorem is identical to the discrete-time @@ -1076,51 +1084,40 @@ forces $d_\infty = 0$. ### Bayesian learning -The most direct application is to Bayesian inference. +The most direct application is Bayesian inference. -Suppose data -$(x_1, x_2, \ldots)$ are drawn from the true measure $Q^*$. An agent -holds a prior $\pi$ over a family $\{Q_\theta : \theta \in \Theta\}$, -inducing a marginal $P = \int Q_\theta\,\pi(d\theta)$. +Suppose data $(x_1, x_2, \ldots)$ are drawn from the true measure $Q^*$. -If $P \ll Q^*$ (equivalently: the prior assigns positive probability to every -neighbourhood of the true model), then by Blackwell–Dubins, +An agent holds a prior $\pi$ over a family $\{Q_\theta : \theta \in \Theta\}$, inducing a marginal $P = \int Q_\theta\,\pi(d\theta)$. + +If $P \ll Q^*$, then Blackwell–Dubins gives $$ \bigl\|P(\,\cdot\,|\,x_1,\ldots,x_n) - Q^*(\,\cdot\,|\,x_1,\ldots,x_n)\bigr\|_{\mathrm{TV}} \to 0 \quad Q^*\text{-a.s.} $$ -This is a strong form of **Bayesian consistency**: the agent's predictions -become indistinguishable from the truth, regardless of the specific prior, -as long as the prior is in the right absolute-continuity class. +This is a strong form of Bayesian consistency. + +In many dominated parametric models, absolute continuity follows from the prior assigning positive mass to a suitable neighbourhood of the true parameter. -{cite:t}`diaconis1986` establish that absolute continuity of the prior -with respect to the truth is not just sufficient but essentially *necessary* -for Doob consistency. +{cite:t}`diaconis1986` show that this absolute-continuity condition is not just sufficient but essentially *necessary* for Doob consistency. -When $P \perp Q^*$, there exist events of probability -one under $Q^*$ that have probability zero under $P$, so the agent maintains -fundamentally wrong beliefs forever. +When $P \perp Q^*$, there are events of probability one under $Q^*$ that have probability zero under $P$, so the agent's beliefs remain fundamentally misspecified. ### Rational expectations and heterogeneous priors -In macroeconomics, the **common prior assumption** embedded in rational -expectations models requires all agents to agree on the probability model -for the economy. +In macroeconomics, rational-expectations models typically impose a common prior. -Blackwell–Dubins provides a dynamic justification: if -two agents start with heterogeneous but mutually absolutely continuous -priors and observe a common history, their conditional forecasts will -eventually agree on every event, even if they never explicitly coordinate -their beliefs. +Blackwell–Dubins gives a dynamic justification for weaker initial agreement. -{cite:t}`aumann1976`'s **agreement theorem** strengthens this: agents with +If two agents start with equivalent priors and observe the same history, their conditional forecasts eventually agree on every event. + +{cite:t}`aumann1976`'s agreement theorem strengthens this: agents with a common prior cannot "agree to disagree" on posterior probabilities. -Blackwell–Dubins complements Aumann by showing that even without a common -prior, merging occurs eventually if the initial priors are equivalent. +Blackwell–Dubins complements Aumann by showing that equivalent priors are enough for eventual agreement. ### Ergodic Markov chains @@ -1137,20 +1134,16 @@ $$ \to 0. $$ -This is a special form of merging that does *not* require absolute -continuity, because ergodicity already forces both distributions to the same -limit. +This is a special form of merging that does *not* require absolute continuity, because ergodicity already forces both distributions to the same limit. + +Blackwell–Dubins is the right analogue for non-ergodic or non-Markovian environments, where no single invariant distribution need exist. -Blackwell–Dubins is the appropriate generalisation for -**non-ergodic** or **non-Markovian** processes, where no single invariant -measure exists and the operative condition is absolute continuity of the -initial priors. +## The rate of merging -## The Rate of Merging +Blackwell–Dubins is qualitative. -Blackwell–Dubins gives only almost-sure convergence; it says nothing about -*how fast* $d_n \to 0$. +It tells us that $d_n \to 0$, but not how fast. The bound @@ -1158,16 +1151,21 @@ $$ \mathbb{E}_Q[d_n] \leq \tfrac{1}{2}\,\mathbb{E}_Q[|Z_n - Z_\infty|] $$ -shows that the rate of merging is controlled by the $L^1$ convergence -rate of the likelihood ratio martingale. +shows that the rate of merging is controlled by the $L^1$ convergence rate of the likelihood-ratio martingale. -For parametric Bayesian models, the posterior contracts at the -$n^{-1/2}$ rate (Bernstein–von Mises theorem), which implies -$d_n = O(n^{-1/2})$ in expectation. +In regular parametric examples, one often sees $n^{-1/2}$-type behavior. -The following figure illustrates this for our Beta–Bernoulli model. +The next figure checks that heuristic in the Beta–Bernoulli model. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + A log-log plot of the average merging distance in the Beta–Bernoulli model. + The fitted slope is close to $-1/2$, which is consistent with square-root decay in this experiment. + name: fig-merging-of-opinions-rate +--- # ------------------------------------------------------------------------- # Rate of merging: compare d_n to n^{-1/2} # ------------------------------------------------------------------------- @@ -1195,11 +1193,10 @@ ref_curve = C_ref / np.sqrt(ns_rate) fig, ax = plt.subplots(figsize=(8, 4)) ax.loglog(ns_rate, mean_tv, color='steelblue', lw=2, label=r'$\mathbb{E}_Q[d_n]$ (Monte Carlo)') -ax.loglog(ns_rate, ref_curve, color='firebrick', lw=1.5, ls='--', +ax.loglog(ns_rate, ref_curve, color='firebrick', lw=2, ls='--', label=rf'Reference $C/\sqrt{{n}}$ (fitted slope $\approx {slope:.2f}$)') -ax.set_xlabel('$n$') +ax.set_xlabel('sample size $n$') ax.set_ylabel(r'$\mathbb{E}_Q[d_n]$') -ax.set_title(r'Rate of merging: $\mathbb{E}_Q[d_n] = O(n^{-1/2})$') ax.legend() plt.tight_layout() plt.show() @@ -1207,148 +1204,67 @@ plt.show() print(f"Fitted log-log slope: {slope:.3f} (predicted: -0.50)") ``` -The log–log slope of approximately $-0.5$ confirms the $O(n^{-1/2})$ rate -for this parametric model on any single sample path. +The fitted slope is close to $-0.5$. + +That is consistent with $n^{-1/2}$ scaling in this simulation. ## Summary -The logical flow underlying the Blackwell–Dubins theorem is: +The logical flow underlying the Blackwell–Dubins theorem is: $$ P \ll Q \;\Longrightarrow\; -Z_n = \mathbb{E}_Q[Z_\infty\,|\,\mathscr{F}_n] -\text{ uniformly integrable} +Z = \frac{dP}{dQ} \in L^1(Q) \;\Longrightarrow\; -Z_n \xrightarrow{L^1(Q)} Z_\infty +Z_n = \mathbb{E}_Q[Z \,|\, \mathscr{F}_n] +\xrightarrow{L^1(Q)} +Z_\infty \;\Longrightarrow\; d_n \xrightarrow{Q\text{-a.s.}} 0. $$ Key takeaways: -1. **Mutual absolute continuity** is the operative condition. - It is a statement about which events are deemed *possible*, not about - how likely they are. Two agents can disagree wildly on probabilities - yet still be guaranteed to eventually agree — provided neither agent - rules out events the other considers possible. +1. One-sided absolute continuity is enough for the theorem. + If $P \ll Q$, merging holds $Q$-almost surely. + If $P \sim Q$, then the conclusion is symmetric. -2. **The likelihood-ratio martingale** is the central object. +2. The likelihood-ratio martingale is the central object. Its $L^1$ convergence (guaranteed by uniform integrability under $P \ll Q$) drives the almost-sure convergence of the total-variation distance $d_n$. -3. **The supermartingale structure of $d_n$** provides the almost-sure +3. The supermartingale structure of $d_n$ provides the almost-sure convergence: more data can only reduce (in expectation) the difficulty of telling two hypotheses apart. -4. **Kakutani's theorem** tells us when $P \ll Q$ holds for product +4. Kakutani's theorem tells us when $P \ll Q$ holds for product measures: precisely when the Hellinger affinities satisfy $\sum_n (1 - \rho_n) < \infty$. -5. **There is a sharp dichotomy**: either $P \sim Q$ (merging) or - $P \perp Q$ (permanent disagreement). There is no middle ground. - -## Applications in Economics - -{cite}`KalaiLehrer1993Nash` apply the Blackwell--Dubins theorem to show that -rational Bayesian learning in an infinitely repeated game leads play to -resemble a Nash equilibrium. - -If each player's prior is absolutely continuous -with respect to the true distribution of opponents' strategies, beliefs merge -with the truth and best responses converge to approximate Nash behavior. - -This paper is a primary conduit through which Blackwell--Dubins entered mainstream -economic theory. - -{cite}`KalaiLehrer1993Subjective` introduce the notion of a subjective -equilibrium in repeated games and use the merging result to establish that -subjective and objective equilibria coincide in the long run under the same -absolute continuity condition. +5. For the product-measure settings covered by Kakutani, there is a sharp dichotomy: + either $P \sim Q$ and merging occurs, or $P \perp Q$ and disagreement can persist forever. -{cite}`KalaiLehrer1994Merging` extend the Blackwell--Dubins framework by -introducing weaker notions of merging---weak and strong---suited to studying -convergence to equilibrium in infinite games and dynamic economies where -full total-variation merging is too demanding. +## Applications in economics -{cite}`KalaiLehrerSmorodinsky1999` connect merging of opinions to the theory -of calibrated forecasting, showing that a forecaster whose priors satisfy an -absolute-continuity condition relative to the true process will be calibrated -in the long run. +Some influential applications and extensions are: +- {cite}`KalaiLehrer1993Nash`: repeated-game learning drives play toward Nash behavior when priors are absolutely continuous with respect to the truth. +- {cite}`KalaiLehrer1993Subjective`: subjective and objective equilibria coincide asymptotically under the same condition. +- {cite}`KalaiLehrer1994Merging`: weak and strong notions of merging are introduced for environments where full total-variation convergence is too strong. +- {cite}`KalaiLehrerSmorodinsky1999`: merging is linked to calibrated forecasting. +- {cite}`JacksonKalaiSmorodinsky1999`: de~Finetti-style representations are connected to Bayesian learning and posterior convergence. +- {cite}`JacksonKalai1999`: social learning erodes reputational effects that rely on persistent disagreement across cohorts. +- {cite}`Sandroni1998Nash`: near-absolute-continuity conditions are shown to suffice for Nash-type convergence in repeated games. +- {cite}`Sandroni2000`: gives an alternative proof and an economic interpretation of persistent disagreement in terms of mutually favorable bets. +- {cite}`LehrerSmorodinsky1996Compatible`: studies broader compatibility notions beyond Blackwell--Dubins absolute continuity. +- {cite}`LehrerSmorodinsky1996Learning`: surveys merging and learning in repeated strategic environments. +- {cite}`Nyarko1994`: relates Bayesian learning under absolute continuity to convergence toward correlated equilibrium. +- {cite}`PomattoAlNajjarSandroni2014`: extends the theorem to finitely additive probabilities and connects merging to test manipulability. +- {cite}`AcemogluChernozhukovYildiz2016`: shows how disagreement can persist when agents are uncertain about the signal structure itself. -{cite}`JacksonKalaiSmorodinsky1999` revisit de~Finetti's representation theorem -through the lens of Bayesian learning. - -They show that when agents learn about -a stochastic process by observing its realizations, the beliefs that emerge -have a de~Finetti-style exchangeability structure, with the Blackwell--Dubins -theorem playing a central role in establishing convergence of posterior -representations. - -{cite}`JacksonKalai1999` study a model of recurring games in which successive -cohorts of players observe the history of earlier play. Using the -rational-learning machinery of Kalai--Lehrer together with Blackwell--Dubins, -they show that reputational effects that can sustain non-Nash behavior in an -isolated group dissipate over time as social learning spreads through the -population. - - -{cite}`Sandroni1998Nash` shows that the absolute continuity condition required -by Blackwell--Dubins and by Kalai--Lehrer can be weakened to *almost* absolute -continuity---a condition under which Nash convergence of learning still holds, -broadening the scope of the rational-learning program. - -{cite}`Sandroni2000` provides an alternative proof of the Blackwell--Dubins -theorem that makes the role of absolute continuity transparent. - -The paper -argues that *persistent disagreement*---the negation of merging---implies the -existence of mutually favorable bets on which each agent is certain to profit -on average, a violation of absolute continuity. - -The analysis raises questions -about the economic relevance of the merging result by clarifying just how -strong the absolute continuity hypothesis is. - - -{cite}`LehrerSmorodinsky1996Compatible` characterize the class of -*compatible* pairs of measures---those for which some form of merging -obtains---going beyond the sufficient condition of absolute continuity -used by Blackwell and Dubins. - -{cite}`LehrerSmorodinsky1996Learning` survey the relationship between merging -of opinions and learning in repeated strategic environments, collected in a -volume honoring David Blackwell. - - -{cite}`Nyarko1994` uses Blackwell--Dubins to prove that Bayesian learning in -an infinitely repeated normal-form game leads beliefs and empirical -distributions to converge to a correlated equilibrium of the true game, -under the absolute continuity of priors. - -{cite}`PomattoAlNajjarSandroni2014` extend the Blackwell--Dubins theorem to -the class of finitely additive (Savagean) probability measures. - -They show -that the theorem holds for extreme points of the set of measures compatible -with a given prior, and they exploit this characterization to study when -statistical tests of forecasting ability can be manipulated---connecting -the merging and testing literatures. - -{cite}`AcemogluChernozhukovYildiz2016` identify a fragility in the -Blackwell--Dubins consensus result. - -When agents are uncertain not just about -an underlying parameter but also about the mapping from the parameter to signal -distributions, absolute continuity of priors is no longer sufficient for -asymptotic agreement. - -Even with identical support, agents may disagree -forever, providing a Bayesian foundation for persistent heterogeneous beliefs. - -## A Key Companion Paper from Probability +## A key companion paper from probability {cite}`DiaconisFreedman1986` establish consistency of Bayes estimates under misspecification, a result in the same intellectual tradition as @@ -1357,4 +1273,3 @@ Blackwell--Dubins. It is routinely co-cited with the merging theorem in the economics learning literature as providing the probabilistic underpinning for Bayesian consistency. - From ffea1a8fca4a9eb8256b3d64f64c40a88f992a18 Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Wed, 1 Apr 2026 19:23:30 -0400 Subject: [PATCH 07/20] Tom's additional lecture on survival --- lectures/_static/quant-econ.bib | 218 ++++ lectures/_toc.yml | 1 + lectures/survival_recursive_preferences.md | 1153 ++++++++++++++++++++ 3 files changed, 1372 insertions(+) create mode 100644 lectures/survival_recursive_preferences.md diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 6c66b3b05..fbae27a9d 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -1,3 +1,221 @@ +@article{Borovicka2020, + author = {Borovička, Jaroslav}, + title = {Survival and Long-Run Dynamics with Heterogeneous Beliefs under Recursive Preferences}, + journal = {Journal of Political Economy}, + volume = {128}, + number = {1}, + pages = {206--251}, + year = {2020}, + publisher = {University of Chicago Press} +} + +@article{Sandroni2000, + author = {Sandroni, Alvaro}, + title = {Do Markets Favor Agents Able to Make Accurate Predictions?}, + journal = {Econometrica}, + volume = {68}, + number = {6}, + pages = {1303--1341}, + year = {2000} +} + +@article{Blume_Easley2006, + author = {Blume, Lawrence and Easley, David}, + title = {If You're So Smart, Why Aren't You Rich? {B}elief Selection in Complete and Incomplete Markets}, + journal = {Econometrica}, + volume = {74}, + number = {4}, + pages = {929--966}, + year = {2006} +} + +@article{Epstein_Zin1989, + author = {Epstein, Larry G. and Zin, Stanley E.}, + title = {Substitution, Risk Aversion, and the Temporal Behavior of Consumption and Asset Returns: A Theoretical Framework}, + journal = {Econometrica}, + volume = {57}, + number = {4}, + pages = {937--969}, + year = {1989} +} + +@article{Epstein_Zin1991, + author = {Epstein, Larry G. and Zin, Stanley E.}, + title = {Substitution, Risk Aversion, and the Temporal Behavior of Consumption and Asset Returns: An Empirical Analysis}, + journal = {Journal of Political Economy}, + volume = {99}, + number = {2}, + pages = {263--286}, + year = {1991} +} + +@article{Duffie_Epstein1992a, + author = {Duffie, Darrell and Epstein, Larry G.}, + title = {Stochastic Differential Utility}, + journal = {Econometrica}, + volume = {60}, + number = {2}, + pages = {353--394}, + year = {1992} +} + +@article{Dumas_Uppal_Wang2000, + author = {Dumas, Bernard and Uppal, Raman and Wang, Tan}, + title = {Efficient Intertemporal Allocations with Recursive Utility}, + journal = {Journal of Economic Theory}, + volume = {93}, + number = {2}, + pages = {154--183}, + year = {2000} +} + +@article{DeLong_etal1991, + author = {De Long, J. Bradford and Shleifer, Andrei and Summers, Lawrence H. and Waldmann, Robert J.}, + title = {The Survival of Noise Traders in Financial Markets}, + journal = {Journal of Business}, + volume = {64}, + number = {1}, + pages = {1--19}, + year = {1991} +} + +@article{Blume_Easley1992, + author = {Blume, Lawrence and Easley, David}, + title = {Evolution and Market Behavior}, + journal = {Journal of Economic Theory}, + volume = {58}, + number = {1}, + pages = {9--40}, + year = {1992} +} + +@article{Yan2008, + author = {Yan, Hongjun}, + title = {Natural Selection in Financial Markets: Does It Work?}, + journal = {Management Science}, + volume = {54}, + number = {11}, + pages = {1935--1950}, + year = {2008} +} + +@article{Kogan_etal2006, + author = {Kogan, Leonid and Ross, Stephen A. and Wang, Jiang and Westerfield, Mark M.}, + title = {The Price Impact and Survival of Irrational Traders}, + journal = {Journal of Finance}, + volume = {61}, + number = {1}, + pages = {195--229}, + year = {2006} +} + +@article{Kogan_etal2017, + author = {Kogan, Leonid and Ross, Stephen A. and Wang, Jiang and Westerfield, Mark M.}, + title = {Market Selection}, + journal = {Journal of Economic Theory}, + volume = {168}, + pages = {209--236}, + year = {2017} +} + +@article{Harrison_Kreps1979, + author = {Harrison, J. Michael and Kreps, David M.}, + title = {Martingales and Arbitrage in Multiperiod Securities Markets}, + journal = {Journal of Economic Theory}, + volume = {20}, + number = {3}, + pages = {381--408}, + year = {1979} +} + +@article{Kreps_Porteus1978, + author = {Kreps, David M. and Porteus, Evan L.}, + title = {Temporal Resolution of Uncertainty and Dynamic Choice Theory}, + journal = {Econometrica}, + volume = {46}, + number = {1}, + pages = {185--200}, + year = {1978} +} + +@article{Lucas_Stokey1984, + author = {Lucas, Robert E. and Stokey, Nancy L.}, + title = {Optimal Growth with Many Consumers}, + journal = {Journal of Economic Theory}, + volume = {32}, + number = {1}, + pages = {139--171}, + year = {1984} +} + +@book{Karlin_Taylor1981, + author = {Karlin, Samuel and Taylor, Howard M.}, + title = {A Second Course in Stochastic Processes}, + publisher = {Academic Press}, + year = {1981} +} + +@article{Bansal_Yaron2004, + author = {Bansal, Ravi and Yaron, Amir}, + title = {Risks for the Long Run: A Potential Resolution of Asset Pricing Puzzles}, + journal = {Journal of Finance}, + volume = {59}, + number = {4}, + pages = {1481--1509}, + year = {2004} +} + +@article{Brunnermeier_etal2014, + author = {Brunnermeier, Markus K. and Simsek, Alp and Xiong, Wei}, + title = {A Welfare Criterion for Models with Distorted Beliefs}, + journal = {Quarterly Journal of Economics}, + volume = {129}, + number = {4}, + pages = {1753--1797}, + year = {2014} +} + +@article{Feller1952, + author = {Feller, William}, + title = {The Parabolic Differential Equations and the Associated Semi-Groups of Transformations}, + journal = {Annals of Mathematics}, + volume = {55}, + number = {3}, + pages = {468--519}, + year = {1952} +} + +@article{Geoffard1996, + author = {Geoffard, Pierre-Yves}, + title = {Discounting and Optimizing: Capital Accumulation Problems as Variational Minmax Problems}, + journal = {Journal of Economic Theory}, + volume = {69}, + number = {1}, + pages = {53--70}, + year = {1996} +} + +@article{Garleanu_Panageas2015, + author = {Gârleanu, Nicolae and Panageas, Stavros}, + title = {Young, Old, Conservative, and Bold: The Implications of Heterogeneity and Finite Lives for Asset Pricing}, + journal = {Journal of Political Economy}, + volume = {123}, + number = {3}, + pages = {670--685}, + year = {2015} +} + +@article{Negishi1960, + author = {Negishi, Takashi}, + title = {Welfare Economics and Existence of an Equilibrium for a Competitive Economy}, + journal = {Metroeconomica}, + volume = {12}, + number = {2--3}, + pages = {92--97}, + year = {1960} +} + + ### QuantEcon Bibliography File used in conjuction with sphinxcontrib-bibtex package Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ diff --git a/lectures/_toc.yml b/lectures/_toc.yml index b15de98f3..28999d83f 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -46,6 +46,7 @@ parts: - file: mix_model - file: navy_captain - file: merging_of_opinions + - file: survival_recursive_preferences - caption: Linear Programming numbered: true chapters: diff --git a/lectures/survival_recursive_preferences.md b/lectures/survival_recursive_preferences.md new file mode 100644 index 000000000..5a18afb4e --- /dev/null +++ b/lectures/survival_recursive_preferences.md @@ -0,0 +1,1153 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.11.1 +kernelspec: + display_name: Python 3 + language: python + name: python3 +--- + +# Survival and Long-Run Dynamics under Recursive Preferences + +```{index} single: Survival; Recursive Preferences +``` + +## Overview + +This lecture describes a theory of **long-run survival** of agents with heterogeneous beliefs +developed by {cite}`Borovicka2020`. + +The classical **market selection hypothesis** asserts that agents with incorrect beliefs +will be driven from the market in the long run --- they will lose all of their wealth +to agents with more accurate beliefs. + +This result was established rigorously by {cite}`Sandroni2000` and {cite}`Blume_Easley2006` +for economies in which agents have **separable** (CRRA) preferences. + +{cite}`Borovicka2020` shows that when agents have **recursive preferences** +of the {cite}`Epstein_Zin1989` type, the market selection hypothesis can fail: +agents with incorrect beliefs can survive and even prosper in the long run. + +The key insight is that recursive preferences **disentangle** risk aversion from the +intertemporal elasticity of substitution (IES), and this separation opens new channels +through which agents with incorrect beliefs can accumulate wealth. + +Three survival channels emerge: + +1. **Risk premium channel**: a more optimistic agent earns a higher expected logarithmic + return on her portfolio by holding a larger share of risky assets +2. **Speculative volatility channel**: speculative portfolio positions generate volatile + returns that penalize survival through a Jensen's inequality effect +3. **Saving channel**: under high IES, an agent who believes her portfolio has a high + expected return responds by saving more, which can help her outsave extinction + +Under separable CRRA preferences, only the first two channels operate, and they ensure +that the agent with more accurate beliefs always dominates. +With recursive preferences, the saving channel can tip the balance in favor of an agent +whose beliefs are less accurate. + +```{note} +The paper builds on the continuous-time recursive utility formulation of {cite}`Duffie_Epstein1992a`, +using the planner's problem approach of {cite}`Dumas_Uppal_Wang2000`. +Important foundations for the market selection hypothesis were laid by +{cite}`DeLong_etal1991` and {cite}`Blume_Easley1992`. +``` + +Let's start with some imports: + +```{code-cell} ipython3 +import numpy as np +import matplotlib.pyplot as plt +from scipy.integrate import solve_bvp +from scipy.optimize import brentq +``` + +## Environment + +The economy is populated by two types of infinitely lived agents, $n \in \{1, 2\}$, +who have identical recursive preferences but **differ in their beliefs** about the +distribution of future aggregate endowment. + +### Aggregate endowment + +Aggregate endowment $Y$ follows a geometric Brownian motion under the true probability +measure $P$: + +$$ +d \log Y_t = \mu_Y dt + \sigma_Y dW_t, \quad Y_0 > 0 +$$ (eq:endowment) + +where $W$ is a standard Brownian motion, $\mu_Y$ is the drift, and $\sigma_Y > 0$ is the +volatility. + +### Heterogeneous beliefs + +Agent $n$ perceives the drift of aggregate endowment to be $\mu_Y + \omega^n \sigma_Y$ instead +of $\mu_Y$. + +The parameter $\omega^n$ captures the degree of **optimism** ($\omega^n > 0$) or **pessimism** +($\omega^n < 0$) of agent $n$. + +Formally, agent $n$'s subjective probability measure $Q^n$ is defined by the +Radon-Nikodým derivative + +$$ +M_t^n = \frac{dQ^n}{dP}\bigg|_t = \exp\left(-\frac{1}{2} |\omega^n|^2 t + \omega^n W_t\right) +$$ (eq:radon_nikodym) + +Under her own measure $Q^n$, agent $n$ believes that $W_t^n = W_t - \omega^n t$ is a +Brownian motion, so that + +$$ +d \log Y_t = (\mu_Y + \omega^n \sigma_Y) dt + \sigma_Y dW_t^n +$$ + +Agent $n$ with $\omega^n > 0$ is **optimistic** about the growth rate of aggregate endowment; +agent $n$ with $\omega^n < 0$ is **pessimistic**. + +### Recursive preferences + +Both agents have Duffie-Epstein-Zin recursive preferences characterized by three +parameters: + +* $\gamma > 0$: coefficient of relative risk aversion (CRRA) +* $\rho^{-1} > 0$: intertemporal elasticity of substitution (IES) +* $\beta > 0$: time-preference rate + +The felicity function for these preferences is + +$$ +F(C, \nu) = \beta \frac{C^{1-\gamma}}{1-\gamma} \cdot \frac{(1-\gamma) - (1-\rho)\nu / \beta}{\rho - \gamma} +$$ (eq:felicity) + +where $\nu$ is the endogenous discount rate. + +```{note} +When $\gamma = \rho$, preferences reduce to the standard separable CRRA case. +The disentanglement of risk aversion $\gamma$ from the inverse IES $\rho$ is the key +feature that drives the new survival results. +``` + +## Planner's Problem + +Following {cite}`Dumas_Uppal_Wang2000`, we study optimal allocations using a social +planner who maximizes a weighted average of the two agents' continuation values. + +The planner assigns consumption shares $z^1$ and $z^2 = 1 - z^1$ to the two agents +and chooses discount rate processes $\nu^n$ for each agent. + +### Modified discount factors + +It is convenient to incorporate the belief distortions into modified discount factor +processes $\tilde{\lambda}^n = \lambda^n M^n$, where $\lambda^n$ is the standard discount factor. + +The modified discount factor evolves as + +$$ +d \log \tilde{\lambda}_t^n = -\left(\nu_t^n + \frac{1}{2}(\omega^n)^2\right) dt + \omega^n dW_t +$$ (eq:modified_discount) + +### State variable: Pareto share + +The key state variable is the **Pareto share** of agent 1: + +$$ +\upsilon = \frac{\tilde{\lambda}^1}{\tilde{\lambda}^1 + \tilde{\lambda}^2} \in (0, 1) +$$ (eq:pareto_share) + +This single scalar captures the relative weight of agent 1 in the planner's allocation. + +The dynamics of the log-odds ratio $\vartheta = \log(\upsilon / (1-\upsilon))$ are + +$$ +d\vartheta_t = \underbrace{\left[\nu_t^2 + \frac{1}{2}(\omega^2)^2 - \nu_t^1 - \frac{1}{2}(\omega^1)^2\right]}_{m_{\vartheta}(\upsilon_t)} dt + (\omega^1 - \omega^2) dW_t +$$ (eq:log_odds) + +The drift $m_\vartheta(\upsilon)$ determines the long-run behavior of the Pareto share. + +### HJB equation + +The planner's value function takes the form +$J(\tilde{\lambda}_t, Y_t) = (\tilde{\lambda}_t^1 + \tilde{\lambda}_t^2) Y_t^{1-\gamma} \tilde{J}(\upsilon_t)$, +where $\tilde{J}(\upsilon)$ solves a nonlinear ODE: + +$$ +0 = \sup_{(z^1,z^2,\nu^1,\nu^2)} \left\{ \upsilon F(z^1, \nu^1) + (1-\upsilon) F(z^2, \nu^2) + \mathcal{L} \tilde{J}(\upsilon) \right\} +$$ (eq:hjb) + +subject to $z^1 + z^2 \leq 1$, where $\mathcal{L}$ is a second-order differential operator +that captures the drift and diffusion of the state variables. + +The boundary conditions are $\tilde{J}(0) = V^2$ and $\tilde{J}(1) = V^1$, where $V^n$ is the +value in a homogeneous economy populated only by agent $n$. + + +## Survival Conditions + +The central result of the paper characterizes survival in terms of the boundary behavior +of the drift $m_\vartheta(\upsilon)$. + +```{prf:proposition} +:label: survival_conditions + +Define the following repelling conditions (i) and (ii) and their attracting +counterparts (i') and (ii'): + +$$ +\text{(i)} \lim_{\upsilon \searrow 0} m_\vartheta(\upsilon) > 0, \qquad +\text{(i')} \lim_{\upsilon \searrow 0} m_\vartheta(\upsilon) < 0 +$$ + +$$ +\text{(ii)} \lim_{\upsilon \nearrow 1} m_\vartheta(\upsilon) < 0, \qquad +\text{(ii')} \lim_{\upsilon \nearrow 1} m_\vartheta(\upsilon) > 0 +$$ + +Then: + +**(a)** If (i) and (ii) hold, both agents survive under $P$. + +**(b)** If (i) and (ii') hold, agent 1 dominates in the long run under $P$. + +**(c)** If (i') and (ii) hold, agent 2 dominates in the long run under $P$. + +**(d)** If (i') and (ii') hold, each agent dominates with strictly positive probability. +``` + +The proof uses the Feller classification of boundary behavior for diffusion processes, +as discussed in {cite}`Karlin_Taylor1981`. + +The intuition is straightforward: condition (i) says that when agent 1's share is +nearly zero, there is a force pushing it back up; condition (ii) says that when agent 1's +share is nearly one, there is a force pushing it back down. +When both forces are present, the Pareto share is recurrent and both agents survive. + +## Wealth Dynamics Decomposition + +The survival conditions can be expressed in terms of equilibrium wealth dynamics. +When agent 1 becomes negligible ($\upsilon \searrow 0$), equilibrium prices converge to +those in a homogeneous economy populated by agent 2. + +The difference in logarithmic wealth growth rates decomposes as + +$$ +\lim_{\upsilon \searrow 0} [m_A^1(\upsilon) - m_A^2(\upsilon)] += \underbrace{\lim_{\upsilon \searrow 0} [m_R^1(\upsilon) - m_R^2(\upsilon)]}_{\text{portfolio returns}} ++ \underbrace{\lim_{\upsilon \searrow 0} [(y^2(\upsilon))^{-1} - (y^1(\upsilon))^{-1}]}_{\text{consumption rates}} +$$ (eq:wealth_decomp) + +### Portfolio returns + +The difference in expected logarithmic portfolio returns at the boundary is + +$$ +\lim_{\upsilon \searrow 0} [m_R^1 - m_R^2] = \underbrace{\frac{\omega^1 - \omega^2}{\gamma} \cdot \sigma_Y}_{\text{difference in portfolios}} +\cdot \underbrace{(\gamma \sigma_Y^2 - \omega^2 \sigma_Y)}_{\text{risk premium}} +- \underbrace{\frac{1}{2}\left(\frac{\omega^1 - \omega^2}{\gamma}\right)^2}_{\text{volatility penalty}} +$$ (eq:portfolio_returns) + +This depends **only** on risk aversion $\gamma$, not on the IES. + +### Consumption rates + +The difference in consumption rates at the boundary is + +$$ +\lim_{\upsilon \searrow 0} [(y^2)^{-1} - (y^1)^{-1}] += \frac{1-\rho}{\rho} \left[(\omega^1 - \omega^2)\sigma_Y + \frac{(\omega^1 - \omega^2)^2}{2\gamma}\right] +$$ (eq:consumption_rates) + +This depends on $\rho$ (and hence the IES) but enters **only** through the consumption-saving +decision. + +```{code-cell} ipython3 +def portfolio_return_diff(omega1, omega2, gamma, sigma_y): + """ + Difference in expected log portfolio returns at boundary v → 0. + + Parameters + ---------- + omega1 : float + Belief distortion of agent 1 + omega2 : float + Belief distortion of agent 2 + gamma : float + Risk aversion + sigma_y : float + Endowment volatility + + Returns + ------- + float + Difference in log portfolio returns, decomposed into + (risk_premium_effect, volatility_penalty) + """ + delta_omega = omega1 - omega2 + portfolio_diff = delta_omega / gamma + risk_premium = gamma * sigma_y**2 - omega2 * sigma_y + risk_premium_effect = portfolio_diff * risk_premium * sigma_y + # Correct formula from the paper: + # (ω1-ω2)/γ * σ_y * (γσ_y² - ω²σ_y) - (1/2)((ω1-ω2)/γ + ω1-ω2)² + # Simplify using Prop 3.4 + diff_portfolios = delta_omega / gamma + rp = gamma * sigma_y - omega2 + volatility_penalty = 0.5 * (delta_omega * sigma_y / gamma + + delta_omega)**2 + total = diff_portfolios * sigma_y * rp - volatility_penalty + return total + + +def consumption_rate_diff(omega1, omega2, gamma, rho, sigma_y): + """ + Difference in consumption rates at boundary v → 0. + + Parameters + ---------- + omega1, omega2 : float + Belief distortions + gamma : float + Risk aversion + rho : float + Inverse of IES + sigma_y : float + Endowment volatility + + Returns + ------- + float + """ + delta_omega = omega1 - omega2 + subjective_return_diff = (delta_omega * sigma_y + + delta_omega**2 / (2 * gamma)) + return (1 - rho) / rho * subjective_return_diff + + +def survival_drift(omega1, omega2, gamma, rho, sigma_y): + """ + Drift m_ϑ at boundary v → 0, determining survival of agent 1. + + Positive drift means agent 1 survives (repelling boundary). + + Parameters + ---------- + omega1, omega2 : float + Belief distortions of agents 1 and 2 + gamma : float + Risk aversion + rho : float + Inverse of IES + sigma_y : float + Endowment volatility + + Returns + ------- + float + Drift at v = 0 + """ + pr = portfolio_return_diff(omega1, omega2, gamma, sigma_y) + cr = consumption_rate_diff(omega1, omega2, gamma, rho, sigma_y) + return gamma * (pr + cr) +``` + +## Survival Regions + +A central contribution of {cite}`Borovicka2020` is the characterization of +**survival regions** in the $(\gamma, \rho)$ parameter space. + +Under separable CRRA preferences ($\gamma = \rho$), the agent with more accurate beliefs +always dominates --- this is the market selection hypothesis. + +Under recursive preferences, all four survival outcomes from {prf:ref}`survival_conditions` +can occur. + +Let us compute and plot the survival regions for different levels of belief distortion, +following Figure 2 of {cite}`Borovicka2020`. + +We focus on the case where agent 2 has correct beliefs ($\omega^2 = 0$) and agent 1 +has distorted beliefs. + +```{code-cell} ipython3 +def compute_survival_boundary(omega1, omega2, sigma_y, + gamma_range, boundary='lower'): + """ + Compute the boundary curve in (γ, ρ) space where survival + condition holds with equality. + + For boundary='lower' (v → 0): drift at v=0 = 0, giving + condition for agent 1's survival. + For boundary='upper' (v → 1): drift at v=1 = 0, giving + condition for agent 2's survival (symmetric). + + Returns ρ as function of γ along the boundary. + """ + rho_boundary = [] + + if boundary == 'lower': + # Agent 1 survival: drift at v→0 = 0 + # portfolio_returns + consumption_rate_diff = 0 + for gamma in gamma_range: + pr = portfolio_return_diff(omega1, omega2, gamma, + sigma_y) + delta_omega = omega1 - omega2 + subj_ret = (delta_omega * sigma_y + + delta_omega**2 / (2 * gamma)) + if abs(subj_ret) < 1e-15: + rho_boundary.append(np.nan) + continue + # pr + (1-ρ)/ρ * subj_ret = 0 + # pr*ρ + subj_ret - ρ*subj_ret = 0 + # ρ(pr - subj_ret) = -subj_ret + # ρ = -subj_ret / (pr - subj_ret) + # = subj_ret / (subj_ret - pr) + denom = subj_ret - pr + if abs(denom) < 1e-15: + rho_boundary.append(np.nan) + else: + rho_val = subj_ret / denom + rho_boundary.append(rho_val) + else: + # Agent 2 survival: drift at v→1 = 0 (symmetric) + for gamma in gamma_range: + pr = portfolio_return_diff(omega2, omega1, gamma, + sigma_y) + delta_omega = omega2 - omega1 + subj_ret = (delta_omega * sigma_y + + delta_omega**2 / (2 * gamma)) + if abs(subj_ret) < 1e-15: + rho_boundary.append(np.nan) + continue + denom = subj_ret - pr + if abs(denom) < 1e-15: + rho_boundary.append(np.nan) + else: + rho_val = subj_ret / denom + rho_boundary.append(rho_val) + + return np.array(rho_boundary) +``` + +```{code-cell} ipython3 +sigma_y = 0.02 + +fig, axes = plt.subplots(2, 2, figsize=(14, 12)) + +# Four cases of belief distortion +cases = [ + (0.25, 0, r'$\omega^1 = 0.25$ (moderate optimism)'), + (1.0, 0, r'$\omega^1 = 1.0$ (strong optimism)'), + (5.0, 0, r'$\omega^1 \to \infty$ (extreme optimism / $\sigma_Y \to 0$)'), + (-0.5, 0, r'$\omega^1 = -0.5$ (pessimism)') +] + +gamma_range = np.linspace(0.1, 30, 500) + +for idx, (omega1, omega2, title) in enumerate(cases): + ax = axes[idx // 2][idx % 2] + + # Compute boundaries + rho_lower = compute_survival_boundary(omega1, omega2, sigma_y, + gamma_range, + boundary='lower') + rho_upper = compute_survival_boundary(omega1, omega2, sigma_y, + gamma_range, + boundary='upper') + + # Clean up invalid values + rho_lower = np.clip(rho_lower, 0.01, 30) + rho_upper = np.clip(rho_upper, 0.01, 30) + + # Plot boundaries + ax.plot(gamma_range, rho_lower, 'b--', linewidth=2, + label=r'Agent 1 survival boundary') + ax.plot(gamma_range, rho_upper, 'r-', linewidth=2, + label=r'Agent 2 survival boundary') + + # CRRA diagonal + ax.plot(gamma_range, gamma_range, 'k:', linewidth=1, + label=r'CRRA ($\gamma = \rho$)') + + # Label regions + ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) + ax.set_ylabel(r'Inverse of IES $\rho$', fontsize=12) + ax.set_title(title, fontsize=13) + ax.set_xlim(0, 30) + ax.set_ylim(0, 30) + ax.legend(fontsize=9, loc='upper left') + +plt.tight_layout() +plt.show() +``` + +The **shaded region** between the two boundaries corresponds to parameter combinations +where both agents coexist in the long run --- a **nondegenerate stationary distribution** +of wealth exists. + +Key observations: + +* Along the CRRA diagonal ($\gamma = \rho$, dotted line), the agent with more accurate + beliefs always dominates, confirming {cite}`Sandroni2000` and {cite}`Blume_Easley2006` + +* The coexistence region lies in the empirically relevant part of the parameter space + where $\gamma > \rho$ (i.e., risk aversion exceeds the inverse of IES) + +* As optimism increases, the coexistence region expands + +* A pessimistic agent can survive only when IES is sufficiently high and risk aversion + is not too large + + +## Three Survival Channels + +Let us now visualize the contribution of each survival channel to the total survival +drift, varying one parameter at a time. + +```{code-cell} ipython3 +def decompose_survival(omega1, omega2, gamma_vals, rho, sigma_y): + """ + Decompose survival drift into three channels. + + Returns arrays for: + - risk premium channel + - volatility penalty + - saving channel + """ + delta_omega = omega1 - omega2 + + risk_premium_ch = np.zeros_like(gamma_vals) + vol_penalty_ch = np.zeros_like(gamma_vals) + saving_ch = np.zeros_like(gamma_vals) + + for i, gamma in enumerate(gamma_vals): + # Portfolio difference × risk premium + diff_port = delta_omega / gamma + rp = gamma * sigma_y - omega2 + risk_premium_ch[i] = diff_port * sigma_y * rp + + # Volatility penalty (always negative for survival) + vol_penalty_ch[i] = -0.5 * (delta_omega * sigma_y / gamma + + delta_omega)**2 + + # Saving channel + subj_ret = (delta_omega * sigma_y + + delta_omega**2 / (2 * gamma)) + saving_ch[i] = (1 - rho) / rho * subj_ret + + total = risk_premium_ch + vol_penalty_ch + saving_ch + return risk_premium_ch, vol_penalty_ch, saving_ch, total + + +# Parameters +sigma_y = 0.02 +omega1 = 0.25 +omega2 = 0.0 # correct beliefs +rho = 0.67 # IES = 1.5 + +gamma_vals = np.linspace(0.5, 25, 300) + +rp, vp, sc, total = decompose_survival(omega1, omega2, gamma_vals, + rho, sigma_y) + +fig, ax = plt.subplots(figsize=(12, 7)) + +ax.plot(gamma_vals, rp, 'b-', linewidth=2, + label='Risk premium channel') +ax.plot(gamma_vals, vp, 'r--', linewidth=2, + label='Volatility penalty') +ax.plot(gamma_vals, sc, 'g-.', linewidth=2, + label='Saving channel') +ax.plot(gamma_vals, total, 'k-', linewidth=3, + label='Total survival drift') +ax.axhline(0, color='gray', linewidth=0.5) +ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=13) +ax.set_ylabel('Contribution to survival drift', fontsize=13) +ax.set_title( + rf'Decomposition of survival channels ($\omega^1={omega1}$, ' + rf'$\omega^2={omega2}$, IES$={1/rho:.1f}$, ' + rf'$\sigma_Y={sigma_y}$)', + fontsize=13 +) +ax.legend(fontsize=11) +plt.tight_layout() +plt.show() +``` + +The figure reveals the distinct roles of the three channels: + +* The **volatility penalty** (red dashed) is dominant at low risk aversion --- speculative + portfolios generate volatile returns that hurt the incorrect agent + +* The **risk premium channel** (blue) increases with risk aversion --- the more optimistic + agent earns a higher return by holding more of the risky asset + +* The **saving channel** (green) provides a constant positive lift when IES $> 1$ --- + the optimistic agent saves more in response to her perceived high returns + + +## Varying IES + +The intertemporal elasticity of substitution plays a critical role in survival outcomes. + +```{code-cell} ipython3 +fig, axes = plt.subplots(1, 3, figsize=(18, 5)) + +gamma_fixed = 10.0 +omega1 = 0.25 +omega2 = 0.0 +sigma_y = 0.02 + +ies_values = [0.5, 1.0, 1.5] +ies_labels = ['IES = 0.5 (inelastic)', 'IES = 1.0 (log)', + 'IES = 1.5 (elastic)'] + +gamma_range = np.linspace(0.5, 25, 300) + +for idx, (ies, label) in enumerate(zip(ies_values, ies_labels)): + rho = 1.0 / ies + + rp, vp, sc, total = decompose_survival(omega1, omega2, + gamma_range, + rho, sigma_y) + + ax = axes[idx] + ax.plot(gamma_range, rp, 'b-', linewidth=2, + label='Risk premium') + ax.plot(gamma_range, vp, 'r--', linewidth=2, + label='Volatility penalty') + ax.plot(gamma_range, sc, 'g-.', linewidth=2, + label='Saving channel') + ax.plot(gamma_range, total, 'k-', linewidth=3, + label='Total') + ax.axhline(0, color='gray', linewidth=0.5) + ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) + ax.set_ylabel('Contribution', fontsize=12) + ax.set_title(label, fontsize=13) + ax.legend(fontsize=9) + +plt.tight_layout() +plt.show() +``` + +Key insights: + +* When **IES $= 1$** (center), the saving channel vanishes: consumption-wealth ratios + are constant and equal to $\beta$, as in the logarithmic case. + Only the risk premium and volatility channels matter. + +* When **IES $> 1$** (right), the saving channel is positive for the optimistic agent: + she perceives high expected returns and responds by saving more, helping her survive. + +* When **IES $< 1$** (left), the saving channel reverses direction: higher perceived + returns lead to *lower* saving (the income effect dominates), hurting the + optimistic agent's survival. + + +## Asymptotic Results + +{cite}`Borovicka2020` establishes four key asymptotic results: + +**(a) Near risk neutrality** ($\gamma \searrow 0$): each agent dominates with strictly positive +probability. +Low risk aversion encourages speculative portfolio positions. +The volatile returns create a diverging force --- one agent must eventually become +extinct, but which one depends on the realized path. + +**(b) High risk aversion** ($\gamma \nearrow \infty$): the relatively more optimistic agent +always dominates. +The risk premium channel dominates, and the pessimistic agent pays too high a price +for insurance. + +**(c) High IES** ($\rho \searrow 0$): the relatively more optimistic agent always survives. +The saving channel is strong enough to prevent her extinction. +Whether the pessimistic agent also survives depends on risk aversion. + +**(d) Low IES** ($\rho \nearrow \infty$): a nondegenerate long-run equilibrium cannot exist. +Inelastic preferences cause the saving channel to work against survival of the +small agent, regardless of identity. + +```{code-cell} ipython3 +# Illustrate asymptotic result (a): near risk neutrality +fig, ax = plt.subplots(figsize=(10, 6)) + +omega1 = 0.25 +omega2 = 0.0 +sigma_y = 0.02 +rho = 0.67 # IES = 1.5 + +# Show drift at both boundaries as function of gamma +gamma_vals = np.linspace(0.05, 5, 300) + +drift_v0 = np.array([survival_drift(omega1, omega2, g, rho, sigma_y) + for g in gamma_vals]) +# Drift at v=1 by swapping agents +drift_v1 = np.array([survival_drift(omega2, omega1, g, rho, sigma_y) + for g in gamma_vals]) + +ax.plot(gamma_vals, drift_v0, 'b-', linewidth=2, + label=r'Drift at $\upsilon \to 0$ (agent 1 survival)') +ax.plot(gamma_vals, -drift_v1, 'r--', linewidth=2, + label=r'Drift at $\upsilon \to 1$ (agent 2 survival)') +ax.axhline(0, color='gray', linewidth=0.5) +ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=13) +ax.set_ylabel('Survival drift', fontsize=13) +ax.set_title('Boundary drifts as risk aversion varies', fontsize=13) +ax.legend(fontsize=11) +plt.tight_layout() +plt.show() +``` + + +## The Separable Case: CRRA Benchmark + +Under separable CRRA preferences ($\gamma = \rho$), the dynamics of the log-odds +Pareto share $\vartheta$ become a Brownian motion with constant drift: + +$$ +d\vartheta_t = \frac{1}{2}\left[(\omega^2)^2 - (\omega^1)^2\right] dt + (\omega^1 - \omega^2) dW_t +$$ + +The drift does not depend on the state $\upsilon$ and is determined entirely by the +**relative entropy** (Kullback-Leibler divergence) of the agents' beliefs: +$\frac{1}{2}|\omega^n|^2$. + +The agent with small $|\omega^n|$ --- more accurate beliefs --- always dominates. + +```{code-cell} ipython3 +def simulate_crra_pareto(omega1, omega2, T, dt, n_paths, seed=42): + """ + Simulate Pareto share dynamics under CRRA (γ = ρ). + + Parameters + ---------- + omega1, omega2 : float + Belief distortions + T : float + Time horizon + dt : float + Time step + n_paths : int + Number of sample paths + seed : int + Random seed + + Returns + ------- + t_grid : array + Time grid + v_paths : array + Pareto share paths, shape (n_paths, n_steps) + """ + rng = np.random.default_rng(seed) + n_steps = int(T / dt) + t_grid = np.linspace(0, T, n_steps + 1) + + # Drift and vol of log-odds + drift = 0.5 * (omega2**2 - omega1**2) + vol = omega1 - omega2 + + # Initial log-odds (v0 = 0.5 -> theta0 = 0) + theta = np.zeros((n_paths, n_steps + 1)) + dW = rng.normal(0, np.sqrt(dt), (n_paths, n_steps)) + + for t in range(n_steps): + theta[:, t+1] = theta[:, t] + drift * dt + vol * dW[:, t] + + # Convert to Pareto share + v_paths = 1.0 / (1.0 + np.exp(-theta)) + + return t_grid, v_paths + + +# Simulate +T = 200 +dt = 0.01 +n_paths = 50 + +omega1 = 0.1 # slightly optimistic (incorrect) +omega2 = 0.0 # correct beliefs + +t_grid, v_paths = simulate_crra_pareto(omega1, omega2, T, dt, + n_paths) + +fig, ax = plt.subplots(figsize=(12, 6)) + +for i in range(min(20, n_paths)): + ax.plot(t_grid, v_paths[i], alpha=0.3, linewidth=0.5) + +ax.axhline(0.5, color='gray', linestyle=':', alpha=0.5) +ax.set_xlabel('Time $t$', fontsize=13) +ax.set_ylabel(r'Pareto share $\upsilon_t$', fontsize=13) +ax.set_title( + rf'CRRA case ($\gamma = \rho$): Agent 2 (correct, $\omega^2=0$) ' + rf'dominates over agent 1 ($\omega^1={omega1}$)', + fontsize=13 +) +ax.set_ylim(0, 1) +plt.tight_layout() +plt.show() +``` + +Under separable preferences, agent 2 (with correct beliefs) always drives agent 1's +Pareto share to zero. + + +## Economy with Constant Aggregate Endowment + +An illuminating special case arises when aggregate endowment is constant +($\mu_Y = \sigma_Y = 0$). +In this economy, agents trade purely for **speculative** motives. + +The survival results do not depend on $\mu_Y$ or $\sigma_Y$ independently but only on +the ratio $\omega^n / \sigma_Y$. +The limit $\sigma_Y \to 0$ with $\omega^1 \neq 0$ thus isolates the saving channel. + +In this case: + +* The risk premium is zero (no aggregate risk) +* The speculative volatility channel is present but muted at high risk aversion +* The saving channel alone can generate survival of the incorrect agent when IES $> 1$ + +```{code-cell} ipython3 +# Show survival regions for the limiting case ω/σ_y → ∞ +# (equivalent to σ_y → 0 or ω → ∞) + +fig, ax = plt.subplots(figsize=(10, 8)) + +gamma_range = np.linspace(0.1, 30, 500) + +# In the limit, survival of agent 1 requires IES > 1 +# i.e., ρ < 1 +ax.axhline(1.0, color='b', linestyle='--', linewidth=2, + label=r'Agent 1 survival: $\rho < 1$ (IES $> 1$)') + +# Agent 2 always survives (correct beliefs, no risk premium cost) +# The boundary is the CRRA line +ax.plot(gamma_range, gamma_range, 'k:', linewidth=1, + label=r'CRRA ($\gamma = \rho$)') + +# Shade coexistence region +ax.fill_between(gamma_range, 0, np.minimum(1.0, gamma_range), + alpha=0.2, color='green', + label='Both agents survive') +ax.fill_between(gamma_range, np.minimum(1.0, gamma_range), + np.ones_like(gamma_range), + where=gamma_range > 1, + alpha=0.2, color='blue', + label='Both survive (above CRRA, below ρ=1)') + +ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=13) +ax.set_ylabel(r'Inverse of IES $\rho$', fontsize=13) +ax.set_title( + r'Survival regions: $\sigma_Y \to 0$ (pure speculation)', + fontsize=13) +ax.set_xlim(0, 30) +ax.set_ylim(0, 10) +ax.legend(fontsize=10, loc='upper right') +plt.tight_layout() +plt.show() +``` + +In the economy without aggregate risk, IES $> 1$ is sufficient for the incorrect agent +to survive when risk aversion is sufficiently high. +This is the pure saving channel at work. + + +## Asset Pricing Implications + +{cite}`Borovicka2020` also shows that as the Pareto share of one agent becomes negligible, +current asset prices converge to those in a homogeneous economy populated by the +large agent. + +### Prices at the boundary + +As $\upsilon \searrow 0$ (agent 2 dominates): + +**Risk-free rate:** + +$$ +\lim_{\upsilon \searrow 0} r(\upsilon) = \beta + \rho \mu_Y + \omega^2 \sigma_Y ++ \frac{1}{2}(1-\gamma)\sigma_Y^2 - \frac{1}{2}\gamma \sigma_Y^2 +$$ (eq:riskfree) + +**Wealth-consumption ratio:** + +$$ +\lim_{\upsilon \searrow 0} y(\upsilon) = \left[\beta - (1-\rho)\left(\mu_Y ++ \omega^2 \sigma_Y + \frac{1}{2}(1-\gamma)\sigma_Y^2\right)\right]^{-1} +$$ (eq:wc_ratio) + +### Portfolio choice of the negligible agent + +The small agent's portfolio share in the risky asset converges to + +$$ +\lim_{\upsilon \searrow 0} \pi^1(\upsilon) = 1 + \frac{\omega^1 - \omega^2}{\gamma \sigma_Y} +$$ (eq:portfolio) + +An optimistic agent ($\omega^1 > \omega^2$) holds a **leveraged** position ($\pi^1 > 1$). + +A pessimistic agent ($\omega^1 < \omega^2$) **shorts** the risky asset when +$\omega^1 - \omega^2 < -\gamma \sigma_Y$. + +```{code-cell} ipython3 +# Portfolio share of agent 1 as function of belief distortion +fig, ax = plt.subplots(figsize=(10, 6)) + +omega2 = 0.0 +sigma_y = 0.02 + +omega1_range = np.linspace(-0.5, 1.0, 300) + +for gamma in [2, 5, 10, 20]: + pi1 = 1 + (omega1_range - omega2) / (gamma * sigma_y) + ax.plot(omega1_range, pi1, linewidth=2, + label=rf'$\gamma = {gamma}$') + +ax.axhline(1.0, color='gray', linestyle=':', alpha=0.5) +ax.axhline(0.0, color='gray', linestyle=':', alpha=0.5) +ax.axvline(0.0, color='gray', linestyle=':', alpha=0.5) + +ax.set_xlabel(r'Belief distortion $\omega^1$', fontsize=13) +ax.set_ylabel(r'Portfolio share $\pi^1$', fontsize=13) +ax.set_title( + 'Portfolio share of negligible agent in risky asset', + fontsize=13) +ax.legend(fontsize=11) +plt.tight_layout() +plt.show() +``` + +Key observations: + +* At $\omega^1 = 0$ (correct beliefs), the agent holds the market portfolio ($\pi^1 = 1$) + +* Higher risk aversion reduces the speculative position toward the market portfolio + +* A pessimistic agent with low risk aversion may take a large short position, + generating the volatile returns needed for the saving channel to operate + + +## Optimistic versus Pessimistic Distortions + +A striking feature of the model is that optimistic and pessimistic belief distortions +have **asymmetric** effects on survival. + +An optimistic agent ($\omega^1 > 0$) benefits from both the risk premium channel +(she holds more of the risky asset and earns the risk premium) and the saving +channel (she perceives high returns and saves more under IES $> 1$). + +A pessimistic agent ($\omega^1 < 0$) is disadvantaged by the risk premium channel +(she holds less risky asset and foregoes the premium). +However, she can potentially survive through the saving channel if she shorts the +risky asset aggressively enough to perceive a high expected return on her own portfolio. + +```{code-cell} ipython3 +fig, axes = plt.subplots(1, 2, figsize=(14, 6)) + +sigma_y = 0.02 +omega2 = 0.0 +rho = 0.67 # IES = 1.5 +gamma_range = np.linspace(0.5, 25, 300) + +# Optimistic agent +ax = axes[0] +for omega1 in [0.1, 0.25, 0.5, 1.0]: + rp, vp, sc, total = decompose_survival(omega1, omega2, + gamma_range, + rho, sigma_y) + ax.plot(gamma_range, total, linewidth=2, + label=rf'$\omega^1 = {omega1}$') + +ax.axhline(0, color='gray', linewidth=0.5) +ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) +ax.set_ylabel('Total survival drift', fontsize=12) +ax.set_title('Optimistic agent 1', fontsize=13) +ax.legend(fontsize=10) + +# Pessimistic agent +ax = axes[1] +for omega1 in [-0.1, -0.25, -0.5, -1.0]: + rp, vp, sc, total = decompose_survival(omega1, omega2, + gamma_range, + rho, sigma_y) + ax.plot(gamma_range, total, linewidth=2, + label=rf'$\omega^1 = {omega1}$') + +ax.axhline(0, color='gray', linewidth=0.5) +ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) +ax.set_ylabel('Total survival drift', fontsize=12) +ax.set_title('Pessimistic agent 1', fontsize=13) +ax.legend(fontsize=10) + +plt.tight_layout() +plt.show() +``` + +For the optimistic agent (left), survival drift turns positive at moderate risk +aversion and stays positive. + +For the pessimistic agent (right), survival drift is negative for high risk aversion +and becomes positive only at intermediate risk aversion levels --- and only when the +belief distortion is large enough to induce an aggressive short position. + + + +## Long-Run Consumption Distribution + +When both agents survive, the stationary distribution of consumption shares provides +information about the typical wealth allocation. + +{cite}`Borovicka2020` shows that when agent $n$ survives, she attains an +arbitrarily large consumption share $z^n \in (0, 1)$ with probability one at some +future date. + +Let us simulate the Pareto share dynamics in a simplified model to illustrate +the ergodic behavior. + +```{code-cell} ipython3 +def simulate_pareto_share(omega1, omega2, gamma, rho, sigma_y, + beta, T, dt, n_paths=20, seed=42): + """ + Simulate Pareto share dynamics with state-dependent drift. + + This uses a simplified approximation where the endogenous + discount rate difference is computed from the boundary formulas. + + Parameters + ---------- + omega1, omega2 : float + Belief distortions + gamma, rho : float + Preference parameters + sigma_y : float + Endowment volatility + beta : float + Time preference + T : float + Time horizon + dt : float + Time step + n_paths : int + Number of paths + seed : int + Random seed + + Returns + ------- + t_grid, v_paths : arrays + """ + rng = np.random.default_rng(seed) + n_steps = int(T / dt) + t_grid = np.linspace(0, T, n_steps + 1) + + vol_theta = omega1 - omega2 + + # Compute boundary drifts for interpolation + drift_at_0 = survival_drift(omega1, omega2, gamma, rho, sigma_y) + drift_at_1 = -survival_drift(omega2, omega1, gamma, rho, sigma_y) + + theta = np.zeros((n_paths, n_steps + 1)) + dW = rng.normal(0, np.sqrt(dt), (n_paths, n_steps)) + + for t in range(n_steps): + v = 1.0 / (1.0 + np.exp(-theta[:, t])) + # Interpolate drift between boundaries + # Simple linear interpolation + drift = drift_at_0 * (1 - v) + drift_at_1 * v + theta[:, t+1] = (theta[:, t] + + drift * dt + + vol_theta * dW[:, t]) + + v_paths = 1.0 / (1.0 + np.exp(-theta)) + return t_grid, v_paths + + +# Parameters for coexistence region +omega1 = 0.25 +omega2 = 0.0 +gamma = 10.0 +rho = 0.67 # IES = 1.5 +sigma_y = 0.02 +beta = 0.05 + +T = 500 +dt = 0.05 + +t_grid, v_paths = simulate_pareto_share( + omega1, omega2, gamma, rho, sigma_y, beta, T, dt, + n_paths=50, seed=42 +) + +fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + +# Sample paths +ax = axes[0] +for i in range(20): + ax.plot(t_grid, v_paths[i], alpha=0.3, linewidth=0.5) +ax.axhline(0.5, color='gray', linestyle=':', alpha=0.5) +ax.set_xlabel('Time $t$', fontsize=12) +ax.set_ylabel(r'Pareto share $\upsilon_t$', fontsize=12) +ax.set_title('Sample paths of Pareto share\n' + r'($\gamma=10$, IES$=1.5$, $\omega^1=0.25$, ' + r'$\omega^2=0$)', + fontsize=12) +ax.set_ylim(0, 1) + +# Histogram of final values (approximate stationary distribution) +ax = axes[1] +# Use last half of a very long simulation +t_grid_long, v_long = simulate_pareto_share( + omega1, omega2, gamma, rho, sigma_y, beta, + T=2000, dt=0.05, n_paths=5, seed=123 +) +# Pool observations from second half +v_stationary = v_long[:, v_long.shape[1]//2:].flatten() +ax.hist(v_stationary, bins=80, density=True, alpha=0.7, + color='steelblue', edgecolor='white') +ax.set_xlabel(r'Pareto share $\upsilon$', fontsize=12) +ax.set_ylabel('Density', fontsize=12) +ax.set_title('Approximate stationary distribution', fontsize=12) +ax.set_xlim(0, 1) + +plt.tight_layout() +plt.show() +``` + +When both agents survive, the Pareto share fluctuates persistently across the full +interval $(0, 1)$. +This means the incorrect agent periodically commands a substantial share of +aggregate consumption. + +## Summary + +{cite}`Borovicka2020` overturns the classical market selection hypothesis by showing +that under recursive preferences of the Epstein-Zin type, agents with incorrect +beliefs can survive --- and even thrive --- in the long run. + +The key findings are: + +1. **Three channels** determine survival: the risk premium channel, the speculative + volatility channel, and the saving channel. Only the first two operate under + separable CRRA preferences. + +2. **IES matters**: When IES $> 1$, the saving channel helps agents with distorted beliefs + outsave extinction. When IES $< 1$, it works against them. + +3. **Coexistence is generic**: For empirically relevant parameter values + ($\gamma > \rho$, IES $> 1$), nondegenerate stationary wealth distributions exist. + +4. **Optimism vs. pessimism**: Optimistic agents benefit from both the risk premium and + saving channels. Pessimistic agents can survive only through aggressive shorting + combined with high IES. + +5. **Price impact**: A surviving agent with currently negligible wealth has no impact on + current prices, but will affect prices in the future when her wealth share recovers. + +These results have important implications for asset pricing. +Models that feature agents with heterogeneous beliefs and recursive preferences can +generate persistent heterogeneity and endogenous fluctuations in the wealth +distribution, enriching the dynamics of equilibrium asset prices, risk premia, +and trading volume. From 53c7329497788a93838e3ec3e3ee959c07207aba Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 2 Apr 2026 14:07:43 +1100 Subject: [PATCH 08/20] updates --- lectures/merging_of_opinions.md | 524 +++++++++++++++++--------------- 1 file changed, 273 insertions(+), 251 deletions(-) diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md index ece4a567b..e6905934c 100644 --- a/lectures/merging_of_opinions.md +++ b/lectures/merging_of_opinions.md @@ -67,22 +67,25 @@ from scipy.special import betaln ### The sequence space and its filtration Let $(S, \mathscr{S})$ be a measurable space, called the signal space. + Set $\Omega = S^{\mathbb{N}}$, the set of all infinite sequences $\omega = (x_1, x_2, \ldots)$ with $x_n \in S$, equipped with the product $\sigma$-algebra $\mathscr{F} = \mathscr{S}^{\otimes \mathbb{N}}$. -For each $n \geq 1$, define the **finite-horizon** $\sigma$-algebra +For each $n \geq 1$, define the *finite-horizon* $\sigma$-algebra $$ \mathscr{F}_n = \sigma(x_1, \ldots, x_n), $$ so $\mathscr{F}_1 \subseteq \mathscr{F}_2 \subseteq \cdots \subseteq \mathscr{F}$. + The collection $\{\mathscr{F}_n\}_{n \geq 1}$ is the **natural filtration** generated by the observation process; $\mathscr{F}_n$ encodes everything that can be learned from the first $n$ data points. Let $P$ and $Q$ denote two probability measures on $(\Omega, \mathscr{F})$. + Write $P_n = P|_{\mathscr{F}_n}$ and $Q_n = Q|_{\mathscr{F}_n}$ for their restrictions to the history up to time $n$. @@ -93,14 +96,16 @@ restrictions to the history up to time $n$. $P$ is **absolutely continuous** with respect to $Q$, written $P \ll Q$, if $Q(A) = 0$ implies $P(A) = 0$ for every $A \in \mathscr{F}$. + They are **mutually absolutely continuous**, or **equivalent**, written $P \sim Q$, if both $P \ll Q$ and $Q \ll P$. $P$ is **locally absolutely continuous** with respect to $Q$ if $P_n \ll Q_n$ for every $n \geq 1$. +``` + Global absolute continuity $P \ll Q$ implies local absolute continuity, but not conversely. -``` Mutual absolute continuity means the two agents agree on which events are *possible*. @@ -119,7 +124,7 @@ $$ = \frac{1}{2} \int_E \left|\frac{d\mu}{d\lambda} - \frac{d\nu}{d\lambda}\right| d\lambda, $$ -where $\lambda$ is any dominating measure. +where $\lambda$ is any **dominating measure**, meaning $\mu \ll \lambda$ and $\nu \ll \lambda$ (for example, $\lambda = \mu + \nu$). Equivalently, $\|\mu - \nu\|_{\mathrm{TV}} \in [0,1]$, with 0 meaning $\mu = \nu$ and 1 meaning $\mu \perp \nu$ (mutual singularity). ``` @@ -130,6 +135,32 @@ $$ \|\mu - \nu\|_{\mathrm{TV}} = \mathbb{E}_\nu[(f-1)^+] = 1 - \mathbb{E}_\nu[\min(f,1)]. $$ +```{exercise} +:label: tv_derivation + +Show the identity above. + +*Hint:* Start from $\|\mu - \nu\|_{\mathrm{TV}} = \tfrac{1}{2}\,\mathbb{E}_\nu[|f - 1|]$ (which follows from taking $\nu$ as the dominating measure) and use the fact that $\mathbb{E}_\nu[f] = 1$. +``` + +```{solution} tv_derivation +:class: dropdown + +Since $\mu \ll \nu$, we can use $\nu$ as the dominating measure, so $d\mu/d\nu = f$ and $d\nu/d\nu = 1$, giving + +$$ +\|\mu - \nu\|_{\mathrm{TV}} = \tfrac{1}{2}\,\mathbb{E}_\nu[|f - 1|]. +$$ + +Write $|f-1| = (f-1)^+ + (1-f)^+$. + +Since $\mu$ is a probability measure, $\mathbb{E}_\nu[f] = 1$, so the two parts contribute equally: $\mathbb{E}_\nu[(f-1)^+] = \mathbb{E}_\nu[(1-f)^+]$. + +Therefore $\tfrac{1}{2}\,\mathbb{E}_\nu[|f-1|] = \mathbb{E}_\nu[(f-1)^+]$. + +Next, note that $(f-1)^+ = f - \min(f,1)$, so $\mathbb{E}_\nu[(f-1)^+] = \mathbb{E}_\nu[f] - \mathbb{E}_\nu[\min(f,1)] = 1 - \mathbb{E}_\nu[\min(f,1)]$. +``` + Total variation is one of the strongest standard notions of distance between probability measures. If two measures are close in total variation, then their probabilities of every event are close. @@ -192,8 +223,12 @@ The likelihood-ratio process $\{Z_n\}$ satisfies: 3. $Z_n \to Z_\infty$ in $L^1(Q)$: $\;\mathbb{E}_Q[|Z_n - Z_\infty|] \to 0$. *Proof sketch.* Non-negativity and the martingale property give boundedness -in $L^1(Q)$. Then almost-sure convergence follows from Doob's martingale -convergence theorem {cite:t}`doob1953`. Uniform integrability (which follows +in $L^1(Q)$. + +Then almost-sure convergence follows from Doob's martingale +convergence theorem {cite:t}`doob1953`. + +Uniform integrability (which follows from $Z \in L^1(Q)$ via the conditional Jensen inequality) upgrades this to $L^1(Q)$ convergence. $\square$ ``` @@ -209,17 +244,15 @@ is $$ \frac{d\,P(\,\cdot\,|\,\mathscr{F}_n)}{d\,Q(\,\cdot\,|\,\mathscr{F}_n)} = \frac{Z_\infty}{Z_n} -\qquad Q\text{-a.s.\ on } \{Z_n > 0\}. +\qquad Q\text{-a.s. on } \{Z_n > 0\}. $$ Applying the total-variation formula with $f = Z_\infty / Z_n$ then gives $$ -\boxed{ d_n = \mathbb{E}_{Q(\cdot|\mathscr{F}_n)}\!\left[\left(\frac{Z_\infty}{Z_n} - 1\right)^{\!+}\right] = 1 - \mathbb{E}_{Q(\cdot|\mathscr{F}_n)}\!\left[\min\!\left(\frac{Z_\infty}{Z_n},\,1\right)\right]. -} $$ Multiplying through by $Z_n$ and integrating with respect to $Q$: @@ -237,7 +270,9 @@ So the $L^1$ convergence of the martingale controls how fast the total variation :label: blackwell_dubins Let $P$ and $Q$ be probability measures on $(\Omega, \mathscr{F})$ with -$P \ll Q$. Define +$P \ll Q$. + +Define $$ d_n = \bigl\|P(\,\cdot\,|\,\mathscr{F}_n) - Q(\,\cdot\,|\,\mathscr{F}_n)\bigr\|_{\mathrm{TV}}. @@ -246,16 +281,16 @@ $$ Then $d_n \to 0$ almost surely under $Q$ (and hence also under $P$). ``` -### Proof ingredients - The proof has three steps. Step 1. Representation of $d_n$ via $Z_n$. + As shown above, $d_n$ can be written in terms of $Z_\infty / Z_n$. This reduces the problem to a statement about one martingale under $Q$. Step 2. $\{d_n\}$ is a $Q$-supermartingale. + Conditioning on more information reduces distinguishability on average. Formally, because @@ -271,6 +306,7 @@ So $\{d_n, \mathscr{F}_n\}$ is a non-negative $Q$-supermartingale in $[0,1]$. By Doob's theorem, $d_n \to d_\infty$ $Q$-almost surely for some $[0,1]$-valued random variable $d_\infty$. Step 3. The almost-sure limit is zero. + From Step 1 and the $L^1$ bound: $$ @@ -286,11 +322,13 @@ Because $P \ll Q$, the same conclusion also holds $P$-almost surely. $\square$ ```{prf:remark} One-Sided vs. Mutual Absolute Continuity :label: one_sided_vs_mutual -The theorem requires only $P \ll Q$, not $Q \ll P$. -Under one-sided absolute continuity, merging holds $Q$-a.s. (and hence -$P$-a.s.). If additionally $Q \ll P$, that is, if $P \sim Q$, then merging -holds under *both* agents' measures: neither agent has a positive-probability -path on which the other agent's beliefs remain permanently different. +The theorem requires only $P \ll Q$, not $Q \ll P$. + +Since $P \ll Q$ means every $Q$-null set is also $P$-null, the conclusion $d_n \to 0$ $Q$-a.s. automatically implies $d_n \to 0$ $P$-a.s. + +One-sided absolute continuity is therefore enough for merging under both agents' measures. + +Mutual absolute continuity $P \sim Q$ adds symmetry: the proof can also be run with $P$ as the reference measure and $Q$ as the alternative, but the conclusion is the same. ``` ```{prf:remark} Sharpness @@ -341,16 +379,54 @@ agents' initial priors $(\alpha_i, \beta_i)$. ### The marginal likelihood and likelihood ratio -The marginal probability assigned by agent $i$ to the observed sequence -$x^n$ (with $k$ successes, in any order) is +For each fixed value of $p \in (0,1)$, let $P_p$ denote the IID Bernoulli$(p)$ +probability law on infinite sequences. + +Agent $i$ does not know $p$. +Instead, agent $i$ places the prior density $\pi_i$ on $p$, which induces a +probability measure $P_i$ on data sequences via + +$$ +P_i(A) = \int_0^1 P_p(A)\,\pi_i(p)\,dp +\qquad \text{for every event } A. +$$ + +So $P_i$ is the agent's marginal probability measure over +histories after averaging over uncertainty about $p$. + +In particular, if $x^n$ is an exact observed history with $k$ successes, then +$P_i(x^n)$ means the probability that agent $i$ assigns to that history under +this mixture measure. + +To compute it, start from the Beta density + +$$ +\pi_i(p) += \frac{p^{\alpha_i - 1} (1-p)^{\beta_i - 1}}{B(\alpha_i, \beta_i)}, +\qquad 0 < p < 1. $$ + +Given $p$, the probability of that ordered history is $p^k (1-p)^{n-k}$. + +Therefore + +$$ +\begin{aligned} P_i(x^n) -= \frac{B(\alpha_i + k,\; \beta_i + n - k)}{B(\alpha_i,\, \beta_i)}, +&= \int_0^1 p^k (1-p)^{n-k} \pi_i(p)\, dp \\ +&= \frac{1}{B(\alpha_i, \beta_i)} +\int_0^1 p^{\alpha_i + k - 1} (1-p)^{\beta_i + n - k - 1}\, dp \\ +&= \frac{B(\alpha_i + k,\; \beta_i + n - k)}{B(\alpha_i,\, \beta_i)}. +\end{aligned} $$ where $B(a,b) = \Gamma(a)\Gamma(b)/\Gamma(a+b)$ is the beta function. +This expression is the probability of the ordered history $x^n$. + +It depends on the data only through the count $k$, so histories with the same number of successes receive the same probability. + The likelihood ratio at time $n$ is therefore $$ @@ -375,7 +451,9 @@ processes, where $p$ is drawn from the posterior Beta distribution. Since the Bernoulli$(p)^{\infty}$ measures for different $p$ are mutually singular (the empirical frequency identifies $p$ exactly), the TV distance between the two conditional distributions over the future equals the TV -distance between the two posterior distributions over the parameter $p$: +distance between the two posterior distributions over the parameter $p$. + +The TV distance is $$ d_n @@ -385,31 +463,12 @@ $$ As $k_n/n \to p^*$ and $n \to \infty$, both posterior Betas concentrate around $p^*$ with variance of order $1/n$, so $d_n \to 0$. - -## Python: merging in action - -We set up helper functions and then run the main simulation. +The following code implements the Beta–Bernoulli updating, predictive probabilities, TV distance, and likelihood-ratio computations described above. ```{code-cell} ipython3 -# ------------------------------------------------------------------------- -# Helper functions for the Beta-Bernoulli example -# ------------------------------------------------------------------------- - def beta_bernoulli_update(data, a0, b0): """ Sequential Beta-Bernoulli Bayesian updating. - - Parameters - ---------- - data : 1-D array of 0s and 1s - a0, b0 : float - Prior Beta parameters. - - Returns - ------- - a_post, b_post : 1-D arrays of length len(data) + 1 - Posterior parameters after 0, 1, ..., len(data) observations. - Index 0 is the prior. """ n = len(data) cum_k = np.concatenate([[0], np.cumsum(data)]) # cumulative successes @@ -427,7 +486,7 @@ def predictive_prob(a_post, b_post): def tv_distance_beta(a1, b1, a2, b2, n_grid=2000): """ TV distance between Beta(a1,b1) and Beta(a2,b2) via grid quadrature. - Uses a fine grid on (0,1); fast because it is fully vectorised. + Uses a fine grid on (0,1). """ x = np.linspace(1e-8, 1 - 1e-8, n_grid) dx = x[1] - x[0] @@ -478,7 +537,7 @@ def run_simulation(p_true, a1, b1, a2, b2, n_steps, seed=0): tv_1step=tv_1step, tv_beta=tv_beta, log_Z=log_Z) ``` -### The main merging figure +### Simulation We choose two agents with very different beliefs about the bias of a coin whose true probability of heads is $p^* = 0.65$. @@ -502,52 +561,45 @@ mystnb: The four panels show posterior predictive means, the total-variation distance $d_n$, the likelihood-ratio martingale, and posterior densities at selected horizons. name: fig-merging-of-opinions-beta-bernoulli --- -# ------------------------------------------------------------------------- -# Simulation parameters -# ------------------------------------------------------------------------- p_true = 0.65 -a1, b1 = 1.0, 8.0 # skeptic: prior mean = 1/9 ≈ 0.11 -a2, b2 = 8.0, 1.0 # optimist: prior mean = 8/9 ≈ 0.89 +a1, b1 = 1.0, 8.0 # skeptic +a2, b2 = 8.0, 1.0 # optimist n_steps = 600 sim = run_simulation(p_true, a1, b1, a2, b2, n_steps, seed=7) steps = np.arange(n_steps + 1) -# ------------------------------------------------------------------------- -# Figure 1: merging of predictive distributions and TV distance -# ------------------------------------------------------------------------- fig, axes = plt.subplots(2, 2, figsize=(11, 7)) - -# --- Panel (a): posterior predictive probabilities --- ax = axes[0, 0] ax.plot(steps, sim['pred1'], color='steelblue', lw=2, label=r'Agent 1 $\hat p_1^n$ (prior: skeptic)') ax.plot(steps, sim['pred2'], color='firebrick', lw=2, label=r'Agent 2 $\hat p_2^n$ (prior: optimist)') -ax.axhline(p_true, color='black', lw=1.0, ls='--', label=f'Truth $p^*={p_true}$') +ax.axhline(p_true, color='black', lw=1.0, ls='--', + label=f'Truth $p^*={p_true}$') ax.set_xlabel('observations $n$') ax.set_ylabel('predictive probability') -ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) +ax.set_title('(a) posterior predictive means') ax.legend(fontsize=8) ax.set_ylim(0, 1) -# --- Panel (b): TV distance (exact Blackwell-Dubins d_n) --- ax = axes[0, 1] ax.semilogy(steps, sim['tv_beta'] + 1e-10, color='mediumpurple', lw=2) ax.set_xlabel('observations $n$') -ax.set_ylabel(r'$d_n = \|P(\cdot|\mathscr{F}_n) - Q(\cdot|\mathscr{F}_n)\|_{\mathrm{TV}}$') -ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) +ax.set_ylabel( + r'$d_n = \|P(\cdot|\mathscr{F}_n)' + r' - Q(\cdot|\mathscr{F}_n)\|_{\mathrm{TV}}$' +) +ax.set_title(r'(b) total-variation distance $d_n$') ax.set_ylim(bottom=1e-4) -# --- Panel (c): log likelihood ratio --- ax = axes[1, 0] ax.plot(steps, sim['log_Z'], color='darkorange', lw=2) ax.axhline(0, color='black', lw=0.8, ls=':') ax.set_xlabel('observations $n$') ax.set_ylabel(r'$\log Z_n$') -ax.text(0.03, 0.93, '(c)', transform=ax.transAxes) +ax.set_title(r'(c) log likelihood ratio') -# --- Panel (d): posterior Beta densities at selected epochs --- ax = axes[1, 1] xs = np.linspace(0.01, 0.99, 500) epochs = [0, 20, 100, n_steps] @@ -563,7 +615,7 @@ for epoch, col in zip(epochs, colors): ax.axvline(p_true, color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$') ax.set_xlabel('$p$') ax.set_ylabel('posterior density') -ax.text(0.03, 0.93, '(d)', transform=ax.transAxes) +ax.set_title('(d) posterior densities') from matplotlib.lines import Line2D handles = [ @@ -580,7 +632,7 @@ plt.tight_layout() plt.show() ``` -The four panels tell a coherent story: +The four panels show: - Panel (a): Starting from $\hat{p}_1^0 \approx 0.11$ and $\hat{p}_2^0 \approx 0.89$, both agents' predictive probabilities @@ -594,7 +646,7 @@ The four panels tell a coherent story: distribution centred on the truth. -## Almost-sure convergence across many paths +### Almost-sure convergence across many paths To illustrate the almost-sure character of the theorem, we run many independent replications. @@ -609,9 +661,6 @@ mystnb: The left panel plots the total-variation distance and the right panel plots the likelihood-ratio martingale. name: fig-merging-of-opinions-many-paths --- -# ------------------------------------------------------------------------- -# Simulate N_paths independent realisations -# ------------------------------------------------------------------------- N_paths = 80 n_steps = 500 @@ -629,7 +678,6 @@ for i in range(N_paths): tv_all[i] = s['tv_beta'] logZ_all[i] = s['log_Z'] -# --- Panel (a): TV distance paths --- for i in range(N_paths): ax_tv.semilogy(steps, tv_all[i] + 1e-10, color='steelblue', lw=0.8, alpha=0.3) @@ -637,10 +685,8 @@ ax_tv.semilogy(steps, tv_all.mean(axis=0) + 1e-10, color='black', lw=2, label='mean across paths') ax_tv.set_xlabel('observations $n$') ax_tv.set_ylabel(r'$d_n$ (log scale)') -ax_tv.text(0.03, 0.93, '(a)', transform=ax_tv.transAxes) ax_tv.legend() -# --- Panel (b): log Z_n paths --- for i in range(N_paths): ax_log.plot(steps, logZ_all[i], color='firebrick', lw=0.8, alpha=0.3) @@ -649,23 +695,24 @@ ax_log.plot(steps, logZ_all.mean(axis=0), ax_log.axhline(0, color='gray', lw=0.8, ls=':') ax_log.set_xlabel('observations $n$') ax_log.set_ylabel(r'$\log Z_n$') -ax_log.text(0.03, 0.93, '(b)', transform=ax_log.transAxes) ax_log.legend() plt.tight_layout() plt.show() -# Fraction of paths on which d_n < 0.01 at the final step -frac_small = np.mean(tv_all[:, -1] < 0.01) -print(f"Fraction of paths with d_n < 0.01 at n = {n_steps}: {frac_small:.2f}") +# Finite-horizon summary +frac_below = np.mean(tv_all[:, -1] < 0.30) +mean_final = tv_all[:, -1].mean() +print(f"Fraction of paths with d_n < 0.30 at n = {n_steps}: {frac_below:.2f}") +print(f"Mean distance at n = {n_steps}: {mean_final:.3f}") ``` -In this simulation, the distances are small on almost all sampled paths by the final horizon. +At this finite horizon, the distances have moved down substantially from their initial levels, but they are not yet close to zero. -That is consistent with the theorem's almost-sure conclusion. +That is still consistent with the theorem, because almost-sure convergence is an asymptotic statement. -## The supermartingale property of $d_n$ +### The supermartingale property of $d_n$ The proof relies on $\{d_n\}$ being a non-negative supermartingale. @@ -680,12 +727,6 @@ mystnb: The plots show average increments of $d_n$ and their cumulative sum across many simulated paths. name: fig-merging-of-opinions-supermartingale --- -# ------------------------------------------------------------------------- -# Illustrate the supermartingale property: -# E_Q[d_{n+1} | F_n] <= d_n -# ------------------------------------------------------------------------- -# Proxy: average d_{n+1} - d_n across many paths should be <= 0. - diffs = np.diff(tv_all, axis=1) # shape (N_paths, n_steps) mean_diffs = diffs.mean(axis=0) # average increment at each step cum_sum = np.cumsum(mean_diffs) # cumulative average change @@ -703,7 +744,6 @@ ax.fill_between(range(200), mean_diffs[:200], 0, color='red', label='positive increments') ax.set_xlabel('observations $n$') ax.set_ylabel(r'$\mathbb{E}[d_{n+1} - d_n]$') -ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) ax.legend(fontsize=8) ax = axes[1] @@ -711,7 +751,6 @@ ax.plot(cum_sum[:200], color='darkorange', lw=2) ax.axhline(0, color='black', lw=0.8, ls='--') ax.set_xlabel('observations $n$') ax.set_ylabel(r'cumulative average change in $d_n$') -ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) plt.tight_layout() plt.show() @@ -762,60 +801,36 @@ mystnb: figure: caption: | Failure of merging under singular priors. - The right panel separates the full future-path distance, which stays at one, from the one-step predictive gap, which stays at $|p_P - p_Q|$. + The full future-path distance stays at one, + while the one-step predictive gap stays + at $|p_P - p_Q|$. name: fig-merging-of-opinions-singular-priors --- -# ------------------------------------------------------------------------- -# Failure of merging: mutually singular point-mass priors -# ------------------------------------------------------------------------- -fig, axes = plt.subplots(1, 2, figsize=(11, 4)) - -# True data drawn under Q's model (p_Q = 0.75) -p_P = 0.30 # agent P's fixed belief -p_Q = 0.75 # agent Q's fixed belief ← truth +p_P = 0.30 +p_Q = 0.75 n_steps = 500 -rng = np.random.default_rng(1) -data = rng.binomial(1, p_Q, n_steps) - -# Empirical frequency of successes -emp_freq = np.cumsum(data) / np.arange(1, n_steps + 1) - -# Full future-path TV distance and one-step predictive TV distance tv_singular_full = np.ones(n_steps + 1) tv_singular_1step = np.full(n_steps + 1, np.abs(p_P - p_Q)) -# For comparison: run a Beta-Bernoulli merging experiment with the same truth -sim_abs_cont = run_simulation(p_Q, 1.0, 8.0, 8.0, 1.0, n_steps, seed=1) - -# --- Panel (a): empirical frequency --- -ax = axes[0] -ax.plot(np.arange(1, n_steps + 1), emp_freq, - color='steelblue', lw=2, label='empirical frequency $k_n/n$') -ax.axhline(p_Q, color='firebrick', lw=1.2, ls='--', - label=f'truth $p_Q = {p_Q}$') -ax.axhline(p_P, color='gray', lw=1.2, ls=':', - label=f"Agent P's belief $p_P = {p_P}$") -ax.set_xlabel('observations $n$') -ax.set_ylabel('probability') -ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) -ax.legend(fontsize=8) -ax.set_ylim(0, 1) +sim_abs_cont = run_simulation( + p_Q, 1.0, 8.0, 8.0, 1.0, n_steps, seed=1 +) -# --- Panel (b): TV distance comparison --- -ax = axes[1] +fig, ax = plt.subplots(figsize=(8, 4)) ax.plot(np.arange(n_steps + 1), tv_singular_full, color='firebrick', lw=2, - label=r'singular priors: full-path $d_n = 1$') + label=r'singular: full-path $d_n = 1$') ax.plot(np.arange(n_steps + 1), tv_singular_1step, color='gray', lw=2, ls=':', - label=r'one-step predictive gap $= |p_P - p_Q|$') -ax.plot(np.arange(n_steps + 1), sim_abs_cont['tv_beta'], + label=r'one-step gap $= |p_P - p_Q|$') +ax.plot(np.arange(n_steps + 1), + sim_abs_cont['tv_beta'], color='steelblue', lw=2, - label=r'$\mathrm{Beta}(1,8)$ vs $\mathrm{Beta}(8,1)$') + label=(r'$\mathrm{Beta}(1,8)$ vs' + r' $\mathrm{Beta}(8,1)$')) ax.set_xlabel('observations $n$') ax.set_ylabel(r'$d_n$') -ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) ax.legend(fontsize=8) ax.set_ylim(0, 1.05) @@ -835,7 +850,9 @@ More data does not reconcile the agents, because each rules out paths the other ## Kakutani's theorem: when does merging hold? A natural question is: for which product measures does the Blackwell–Dubins -hypothesis $P \ll Q$ hold? For infinite product measures, the answer is +hypothesis $P \ll Q$ hold? + +For infinite product measures, the answer is given by a classical result of {cite:t}`kakutani1948`. ### Hellinger affinities @@ -874,8 +891,12 @@ $$ :label: kakutani_dichotomy Let $P = \bigotimes_{n=1}^\infty P_n$ and $Q = \bigotimes_{n=1}^\infty Q_n$ -be infinite product measures. Then either $P \sim Q$ or $P \perp Q$; there -is no intermediate case. Specifically, +be infinite product measures. + +Then either $P \sim Q$ or $P \perp Q$; there +is no intermediate case. + +Specifically, $$ P \sim Q @@ -892,12 +913,14 @@ A standard proof studies the likelihood-ratio martingale $Z_N = \prod_{n=1}^N (dP_n/dQ_n)$ together with the identity $\mathbb{E}_Q[\sqrt{Z_N}] = \prod_{n=1}^N \rho_n$. -The product staying positive corresponds to equivalence, while the product collapsing to zero corresponds to singularity. $\square$ +The product staying positive corresponds to equivalence, while the product collapsing to zero corresponds to singularity. + +$\square$ ``` ### Implication for merging -For i.i.d.-type sequences, Kakutani's theorem gives the following picture: +For IID-type sequences, Kakutani's theorem gives the following picture: | Scenario | $\sum_n (1-\rho_n)$ | Conclusion | Merging? | |---|---|---|---| @@ -905,11 +928,11 @@ For i.i.d.-type sequences, Kakutani's theorem gives the following picture: | $P_n \ne Q_n$ with $\sum_n (1-\rho_n) < \infty$ | Finite | $P \sim Q$ | Yes; Blackwell–Dubins applies | | $P_n = P \ne Q = Q_n$ fixed, $n \ge 1$ | $\infty$ | $P \perp Q$ | No | -The i.i.d. case with different fixed marginals is the standard no-merging example. +The IID case with different fixed marginals is the standard no-merging example. If two agents assign permanently different distributions to each observation, they end up in disjoint probability worlds. -### Python: a Gaussian product-measure example +### A Gaussian product-measure example We illustrate Kakutani's dichotomy with Gaussian product measures. @@ -922,97 +945,101 @@ Three choices of $\mu_n$: 3. $\mu_n = c/n$ ($\sum (1-\rho_n) \approx \sum c^2/(8n^2) < \infty$) $\Rightarrow P \sim Q$. ```{code-cell} ipython3 ---- -mystnb: - figure: - caption: | - Kakutani's dichotomy for Gaussian product measures. - Only the $\mu_n = c/n$ sequence produces a finite Hellinger sum and a nondegenerate limiting likelihood ratio. - name: fig-merging-of-opinions-kakutani-gaussian ---- -# ------------------------------------------------------------------------- -# Kakutani's theorem: Gaussian product measures -# ------------------------------------------------------------------------- -def hellinger_affinity_gauss(mu_n): - """Hellinger affinity between N(mu_n, 1) and N(0, 1).""" - return np.exp(-mu_n**2 / 8.0) - - N_max = 2000 ns = np.arange(1, N_max + 1) -c = 2.0 # scale parameter common to all sequences - -sequences = { - r'Constant: $\mu_n = c$': - np.full(N_max, c), - r'$\mu_n = c / \sqrt{n}$': - c / np.sqrt(ns), - r'$\mu_n = c / n$ ($P \sim Q$)': - c / ns, -} +c = 2.0 +N_plot = 400 +rng = np.random.default_rng(0) -fig, axes = plt.subplots(1, 3, figsize=(13, 4)) +cases = [ + (r'$\mu_n = c$ (constant)', np.full(N_max, c)), + (r'$\mu_n = c/\sqrt{n}$', c / np.sqrt(ns)), + (r'$\mu_n = c/n$', c / ns), +] +``` -colors_k = ['firebrick', 'darkorange', 'steelblue'] +With constant drift, $\log Z_N$ drifts to $-\infty$ under $Q$, so $Z_N \to 0$ $Q$-a.s. and $P \perp Q$. -# --- Panel (a): log Hellinger product log prod rho_n = sum log rho_n --- -ax = axes[0] -for (label, mu_seq), col in zip(sequences.items(), colors_k): - rho = hellinger_affinity_gauss(mu_seq) - log_prod = np.cumsum(np.log(rho)) - ax.plot(ns, log_prod, color=col, lw=2, label=label) +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Constant drift $\mu_n = c$: the + likelihood ratio collapses ($P \perp Q$). + name: fig-kakutani-constant +--- +label, μ_seq = cases[0] +x = rng.standard_normal(N_plot) +log_Z_inc = μ_seq[:N_plot] * x - μ_seq[:N_plot]**2 / 2 +log_Z = np.concatenate([[0], np.cumsum(log_Z_inc)]) + +fig, ax = plt.subplots(figsize=(8, 3)) +ax.plot(np.arange(N_plot + 1), log_Z, + color='darkorange', lw=2, label=label) ax.axhline(0, color='black', lw=0.8, ls=':') ax.set_xlabel('horizon $N$') -ax.set_ylabel(r'$\log \prod_{n=1}^{N} \rho_n$') -ax.text(0.03, 0.93, '(a)', transform=ax.transAxes) -ax.legend(fontsize=7.5) - -# --- Panel (b): sum of (1 - rho_n) --- -ax = axes[1] -for (label, mu_seq), col in zip(sequences.items(), colors_k): - rho = hellinger_affinity_gauss(mu_seq) - cum_sum = np.cumsum(1 - rho) - ax.plot(ns, cum_sum, color=col, lw=2, label=label) -ax.set_xlabel('horizon $N$') -ax.set_ylabel(r'$\sum_{n=1}^{N}(1 - \rho_n)$') -ax.text(0.03, 0.93, '(b)', transform=ax.transAxes) -ax.legend(fontsize=7.5) - -# --- Panel (c): simulated log Z_N trajectories --- -rng = np.random.default_rng(0) -N_plot = 400 +ax.set_ylabel(r'$\log Z_N$ under $Q$') +ax.legend(fontsize=8) +plt.tight_layout() +plt.show() +``` -ax = axes[2] -for (label, mu_seq), col in zip(sequences.items(), colors_k): - # generate data from Q = N(0,1) - x = rng.standard_normal(N_plot) - # log Z_N = sum_{n=1}^N [mu_n * x_n - mu_n^2 / 2] - log_Z_increments = mu_seq[:N_plot] * x - mu_seq[:N_plot]**2 / 2.0 - log_Z_path = np.concatenate([[0], np.cumsum(log_Z_increments)]) - ax.plot(np.arange(N_plot + 1), log_Z_path, color=col, lw=2, label=label) +The $\mu_n = c/\sqrt{n}$ case shows the same qualitative picture: despite the drift vanishing, it does so too slowly. +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Drift $\mu_n = c/\sqrt{n}$: still + singular ($P \perp Q$). + name: fig-kakutani-sqrt +--- +label, μ_seq = cases[1] +x = rng.standard_normal(N_plot) +log_Z_inc = μ_seq[:N_plot] * x - μ_seq[:N_plot]**2 / 2 +log_Z = np.concatenate([[0], np.cumsum(log_Z_inc)]) + +fig, ax = plt.subplots(figsize=(8, 3)) +ax.plot(np.arange(N_plot + 1), log_Z, + color='purple', lw=2, label=label) ax.axhline(0, color='black', lw=0.8, ls=':') ax.set_xlabel('horizon $N$') ax.set_ylabel(r'$\log Z_N$ under $Q$') -ax.text(0.03, 0.93, '(c)', transform=ax.transAxes) -ax.legend(fontsize=7.5) - +ax.legend(fontsize=8) plt.tight_layout() plt.show() ``` -The three panels confirm Kakutani's theorem: +Only with $\mu_n = c/n$ does $\sum (1-\rho_n) < \infty$ hold, so the likelihood ratio remains nondegenerate and $P \sim Q$. -- Constant drift (red): $\log \prod \rho_n \to -\infty$ and - $\sum (1-\rho_n) \to \infty$; the likelihood ratio drifts to $-\infty$ - under $Q$, so $Z_N \to 0$ $Q$-a.s. and $P \perp Q$. -- $\mu_n = c/\sqrt{n}$ (orange): the same qualitative picture. - Despite the drift vanishing, it does so too slowly. -- $\mu_n = c/n$ (blue): $\sum (1-\rho_n) < \infty$, the log Hellinger - product stabilises to a finite limit, and the simulated likelihood ratio - remains nondegenerate, which is consistent with $P \sim Q$. +Blackwell–Dubins applies only in this case. -Only in the third case does Blackwell–Dubins apply and merging occur. +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Drift $\mu_n = c/n$: the likelihood + ratio stabilises ($P \sim Q$). + name: fig-kakutani-inv-n +--- +label, μ_seq = cases[2] +x = rng.standard_normal(N_plot) +log_Z_inc = μ_seq[:N_plot] * x - μ_seq[:N_plot]**2 / 2 +log_Z = np.concatenate([[0], np.cumsum(log_Z_inc)]) + +fig, ax = plt.subplots(figsize=(8, 3)) +ax.plot(np.arange(N_plot + 1), log_Z, + color='steelblue', lw=2, label=label) +ax.axhline(0, color='black', lw=0.8, ls=':') +ax.set_xlabel('horizon $N$') +ax.set_ylabel(r'$\log Z_N$ under $Q$') +ax.legend(fontsize=8) +plt.tight_layout() +plt.show() +``` ## Extension to continuous time @@ -1030,6 +1057,7 @@ W_t = \widetilde{W}_t + \int_0^t \theta_s\, ds, $$ where $\widetilde{W}$ is a $P$-Brownian motion. + The Girsanov–Cameron–Martin theorem {cite:p}`girsanov1960` gives the likelihood-ratio process as the stochastic exponential @@ -1038,7 +1066,7 @@ Z_t = \exp\!\left(\int_0^t \theta_s\, dW_s - \frac{1}{2}\int_0^t \theta_s^2\, ds\right). $$ -$Z_t$ is always a non-negative $Q$-local martingale: it is a true martingale +$Z_t$ is always a non-negative $Q$-local martingale; it is a true martingale if and only if $\mathbb{E}_Q[Z_t] = 1$ for all $t$. Novikov's condition {cite:p}`novikov1972`, @@ -1049,17 +1077,22 @@ is sufficient. A key subtlety on $[0,+\infty)$ is that local absolute continuity does *not* imply global absolute continuity on $\mathscr{F}_\infty$. -```{prf:proposition} Dichotomy at Infinity +```{prf:remark} Infinite-Horizon Subtlety :label: dichotomy_at_infinity -Suppose $Z_t$ is a true $Q$-martingale for every finite horizon. Then -$Z_t \to Z_\infty$ $Q$-a.s., and exactly one of the following holds: +Suppose $Z_t$ is a true $Q$-martingale for every finite horizon and let $Z_t \to Z_\infty$ $Q$-a.s. -1. $\{Z_t\}$ is uniformly integrable over $[0,\infty)$: - $P \ll Q$ on $\mathscr{F}_\infty$, with $dP/dQ = Z_\infty > 0$ $P$-a.s. +If $\{Z_t\}$ is uniformly integrable on $[0,\infty)$, then $P \ll Q$ on $\mathscr{F}_\infty$ and -2. $\{Z_t\}$ is *not* uniformly integrable: - $Z_\infty = 0$ $Q$-a.s. and $P \perp Q$ on $\mathscr{F}_\infty$. +$$ +\frac{dP}{dQ} = Z_\infty. +$$ + +If uniform integrability fails, then global absolute continuity on $\mathscr{F}_\infty$ can fail. + +In that case one can no longer conclude merging from the discrete-time argument alone. + +In many standard examples, including a non-zero constant drift, the measures are in fact singular on $\mathscr{F}_\infty$. ``` A convenient sufficient condition in deterministic-drift examples is the **energy condition** @@ -1072,10 +1105,14 @@ Informally, this says the total amount of information separating the two measure When $\theta$ is a non-zero constant, the condition fails, the measures are singular on $\mathscr{F}_\infty$, and merging does not occur. -With $P \ll Q$ on $\mathscr{F}_\infty$ established, the proof of the -continuous-time Blackwell–Dubins theorem is identical to the discrete-time -proof: $\{d_t, \mathscr{F}_t\}$ is a non-negative $Q$-supermartingale in -$[0,1]$, so $d_t \to d_\infty$ $Q$-a.s.; the $L^1$ bound +Whenever $P \ll Q$ on $\mathscr{F}_\infty$ is established, the proof of the +continuous-time Blackwell–Dubins result is identical to the discrete-time +proof. + +$\{d_t, \mathscr{F}_t\}$ is a non-negative $Q$-supermartingale in +$[0,1]$, so $d_t \to d_\infty$ $Q$-a.s. + +The $L^1$ bound $\mathbb{E}_Q[d_t] \leq \tfrac{1}{2}\mathbb{E}_Q[|Z_t - Z_\infty|] \to 0$ forces $d_\infty = 0$. @@ -1101,7 +1138,7 @@ This is a strong form of Bayesian consistency. In many dominated parametric models, absolute continuity follows from the prior assigning positive mass to a suitable neighbourhood of the true parameter. -{cite:t}`diaconis1986` show that this absolute-continuity condition is not just sufficient but essentially *necessary* for Doob consistency. +{cite:t}`diaconis1986` show that this absolute-continuity condition is not just sufficient but essentially *necessary* for Doob consistency. When $P \perp Q^*$, there are events of probability one under $Q^*$ that have probability zero under $P$, so the agent's beliefs remain fundamentally misspecified. @@ -1116,14 +1153,13 @@ If two agents start with equivalent priors and observe the same history, their c {cite:t}`aumann1976`'s agreement theorem strengthens this: agents with a common prior cannot "agree to disagree" on posterior probabilities. - Blackwell–Dubins complements Aumann by showing that equivalent priors are enough for eventual agreement. ### Ergodic Markov chains For a Markov chain with transition kernel $\Pi$ and two initial distributions $\mu$ and $\nu$, the $n$-step distributions are $\mu\Pi^n$ -and $\nu\Pi^n$. +and $\nu\Pi^n$. If $\Pi$ is ergodic with unique stationary distribution $\pi$, both converge to $\pi$, so @@ -1166,9 +1202,6 @@ mystnb: The fitted slope is close to $-1/2$, which is consistent with square-root decay in this experiment. name: fig-merging-of-opinions-rate --- -# ------------------------------------------------------------------------- -# Rate of merging: compare d_n to n^{-1/2} -# ------------------------------------------------------------------------- N_paths_rate = 200 n_steps_rate = 800 @@ -1180,9 +1213,10 @@ for i in range(N_paths_rate): ns_rate = np.arange(1, n_steps_rate + 1) mean_tv = tv_rate[:, 1:].mean(axis=0) # mean d_n, n = 1, ..., n_steps_rate -# Fit a reference line d_n ~ C / sqrt(n) -log_ns = np.log(ns_rate[10:]) -log_tv = np.log(mean_tv[10:] + 1e-12) +# Fit a reference line d_n ~ C / sqrt(n) using the later part of the sample +fit_start = 200 +log_ns = np.log(ns_rate[fit_start:]) +log_tv = np.log(mean_tv[fit_start:] + 1e-12) coeffs = np.polyfit(log_ns, log_tv, 1) slope = coeffs[0] @@ -1194,7 +1228,8 @@ fig, ax = plt.subplots(figsize=(8, 4)) ax.loglog(ns_rate, mean_tv, color='steelblue', lw=2, label=r'$\mathbb{E}_Q[d_n]$ (Monte Carlo)') ax.loglog(ns_rate, ref_curve, color='firebrick', lw=2, ls='--', - label=rf'Reference $C/\sqrt{{n}}$ (fitted slope $\approx {slope:.2f}$)') + label=(rf'Reference $C/\sqrt{{n}}$' + rf' (fitted slope $\approx {slope:.2f}$)')) ax.set_xlabel('sample size $n$') ax.set_ylabel(r'$\mathbb{E}_Q[d_n]$') ax.legend() @@ -1204,12 +1239,12 @@ plt.show() print(f"Fitted log-log slope: {slope:.3f} (predicted: -0.50)") ``` -The fitted slope is close to $-0.5$. +Fitting the later part of the sample gives a slope close to $-0.5$. That is consistent with $n^{-1/2}$ scaling in this simulation. -## Summary +## Summary and extensions The logical flow underlying the Blackwell–Dubins theorem is: @@ -1225,28 +1260,19 @@ Z_\infty d_n \xrightarrow{Q\text{-a.s.}} 0. $$ -Key takeaways: +Takeaways: -1. One-sided absolute continuity is enough for the theorem. - If $P \ll Q$, merging holds $Q$-almost surely. - If $P \sim Q$, then the conclusion is symmetric. +1. One-sided absolute continuity $P \ll Q$ suffices for merging under both measures. -2. The likelihood-ratio martingale is the central object. - Its $L^1$ convergence (guaranteed by uniform integrability under $P \ll Q$) - drives the almost-sure convergence of the total-variation distance $d_n$. +2. The likelihood-ratio martingale $Z_n$ and its $L^1$ convergence drive the result. -3. The supermartingale structure of $d_n$ provides the almost-sure - convergence: more data can only reduce (in expectation) the difficulty - of telling two hypotheses apart. +3. More data can only reduce (in expectation) the difficulty of distinguishing two hypotheses. -4. Kakutani's theorem tells us when $P \ll Q$ holds for product - measures: precisely when the Hellinger affinities satisfy - $\sum_n (1 - \rho_n) < \infty$. +4. Kakutani's theorem characterises when $P \ll Q$ holds for product measures: precisely when $\sum_n (1 - \rho_n) < \infty$. -5. For the product-measure settings covered by Kakutani, there is a sharp dichotomy: - either $P \sim Q$ and merging occurs, or $P \perp Q$ and disagreement can persist forever. +5. For product measures, either $P \sim Q$ and merging occurs, or $P \perp Q$ and disagreement persists forever. -## Applications in economics +### Applications in economics Some influential applications and extensions are: @@ -1254,7 +1280,7 @@ Some influential applications and extensions are: - {cite}`KalaiLehrer1993Subjective`: subjective and objective equilibria coincide asymptotically under the same condition. - {cite}`KalaiLehrer1994Merging`: weak and strong notions of merging are introduced for environments where full total-variation convergence is too strong. - {cite}`KalaiLehrerSmorodinsky1999`: merging is linked to calibrated forecasting. -- {cite}`JacksonKalaiSmorodinsky1999`: de~Finetti-style representations are connected to Bayesian learning and posterior convergence. +- {cite}`JacksonKalaiSmorodinsky1999`: de Finetti-style representations are connected to Bayesian learning and posterior convergence. - {cite}`JacksonKalai1999`: social learning erodes reputational effects that rely on persistent disagreement across cohorts. - {cite}`Sandroni1998Nash`: near-absolute-continuity conditions are shown to suffice for Nash-type convergence in repeated games. - {cite}`Sandroni2000`: gives an alternative proof and an economic interpretation of persistent disagreement in terms of mutually favorable bets. @@ -1264,12 +1290,8 @@ Some influential applications and extensions are: - {cite}`PomattoAlNajjarSandroni2014`: extends the theorem to finitely additive probabilities and connects merging to test manipulability. - {cite}`AcemogluChernozhukovYildiz2016`: shows how disagreement can persist when agents are uncertain about the signal structure itself. -## A key companion paper from probability +### A companion result from probability -{cite}`DiaconisFreedman1986` establish consistency of Bayes estimates under -misspecification, a result in the same intellectual tradition as -Blackwell--Dubins. +{cite}`DiaconisFreedman1986` establish consistency of Bayes estimates under misspecification, a result in the same intellectual tradition as Blackwell--Dubins. -It is routinely co-cited with the merging theorem in the -economics learning literature as providing the probabilistic underpinning for -Bayesian consistency. +It is routinely co-cited with the merging theorem in the economics learning literature as providing the probabilistic underpinning for Bayesian consistency. From 1eca46089b032c378062bfe9bd69134314fbbb86 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 2 Apr 2026 14:19:30 +1100 Subject: [PATCH 09/20] update blackwell --- lectures/blackwell_kihlstrom.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index 017a51d4c..96bb78891 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -1184,7 +1184,7 @@ The lecture {doc}`likelihood_bayes` studies Bayesian learning in a setting that This section transports concepts back and forth between the two lectures. -### The state space is the same +### Setup: states, experiments, and IID draws In {doc}`likelihood_bayes` the unknown "state of the world" is which density nature chose permanently: nature drew the data either from $f$ or from $g$, but not which one is known to the observer. @@ -1196,7 +1196,7 @@ $$ The Bayesian prior $\pi_0 \in [0,1]$ on $s_1 = f$ plays exactly the role of the prior $p \in P$ on the probability simplex in the present lecture. -### A single draw is an experiment +A single draw is an experiment. A single observation $w_t$ constitutes a Blackwell experiment with signal space $X$ and Markov kernel @@ -1209,7 +1209,7 @@ $\mu(\cdot \mid s_1) = f(\cdot)$ and $\mu(\cdot \mid s_2) = g(\cdot)$. This is the continuous-signal analogue of the $N \times M$ Markov matrix studied above (with $N = 2$ states and a continuum of signals instead of $M$ discrete ones). -### $t$ IID draws form a richer experiment +$t$ IID draws form a richer experiment. Observing the history $w^t = (w_1, \ldots, w_t)$ is a strictly more informative Blackwell experiment than observing any sub-history $w^s$ for $s < t$, because the conditional joint densities for $w^t$ are @@ -1225,7 +1225,7 @@ The reverse is impossible — you cannot reconstruct information from fewer draw This is why more data is always weakly better for every expected-utility maximiser (the economic criterion of Blackwell's theorem). -### The likelihood ratio process is the sufficient statistic of the experiment +### Sufficient statistics and posteriors The key formula in {doc}`likelihood_bayes` is @@ -1239,9 +1239,10 @@ Because $\pi_{t+1}$ depends on $w^t$ **only through** $L(w^t)$, the likelihood r In Blackwell's language, the experiment "report $L(w^t)$" is informationally equivalent to "report $w^t$": passing $w^t$ through the deterministic map $w^t \mapsto L(w^t)$ is a (degenerate) stochastic transformation that discards nothing relevant to discriminating $f$ from $g$. -### The posterior lives on the 1-simplex and is Kihlstrom's standard experiment +The posterior lives on the 1-simplex and is Kihlstrom's standard experiment. With $N = 2$ states the probability simplex $P$ collapses to the unit interval $[0,1]$. + Kihlstrom's standard experiment records only the posterior $$ @@ -1252,7 +1253,7 @@ which is the sufficient statistic that the Bayesian tracks throughout. The **distribution** of $\pi_t$ over all possible histories $w^t$ is Kihlstrom's $\hat{\mu}^c$ — the distribution of posteriors induced by the experiment $\mu_t$ starting from prior $\pi_0 = c$. -### The martingale property is mean preservation +### Why more data always helps {doc}`likelihood_bayes` proves that $\{\pi_t\}$ is a **martingale**: @@ -1266,7 +1267,7 @@ This is exactly the **mean-preservation** condition that sits at the centre of K Mean preservation is not a special feature of this two-state example; it is an exact consequence of Bayes' law for **any** experiment. -### Blackwell's theorem explains why more data always helps +Blackwell's theorem explains why more data always helps. Kihlstrom's reformulation states: @@ -1276,7 +1277,7 @@ In the {doc}`likelihood_bayes` setting this means the distribution of $\pi_t$ is The almost-sure convergence $\pi_t \to 0$ or $1$ is the limit of this spreading process — perfect information resolves all uncertainty, collapsing the distribution to a degenerate point mass at a vertex of the simplex. -### DeGroot uncertainty functions and mutual information +DeGroot uncertainty functions connect to mutual information. The Shannon entropy of the two-state posterior is @@ -1296,7 +1297,7 @@ Because $\mu_t$ Blackwell-dominates $\mu_s$ for $t > s$, Blackwell's theorem gua ### Summary table -The table below collects the complete translation between concepts in the two lectures. +The following table collects the translation between concepts in the two lectures. | Concept in {doc}`likelihood_bayes` | Concept in this lecture | |---|---| From ce859f5181f009d84d00bbe5b6986f0977c313ea Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 2 Apr 2026 18:48:53 +1100 Subject: [PATCH 10/20] updates --- lectures/_static/quant-econ.bib | 2 +- lectures/blackwell_kihlstrom.md | 14 +- lectures/survival_recursive_preferences.md | 1582 ++++++++++---------- 3 files changed, 835 insertions(+), 763 deletions(-) diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index fbae27a9d..3c469fff9 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -9,7 +9,7 @@ @article{Borovicka2020 publisher = {University of Chicago Press} } -@article{Sandroni2000, +@article{Sandroni2000Markets, author = {Sandroni, Alvaro}, title = {Do Markets Favor Agents Able to Make Accurate Predictions?}, journal = {Econometrica}, diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index 96bb78891..49951f1d3 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -96,7 +96,7 @@ An **experiment** is described by the conditional distribution of an observed si $\tilde{x}$ given the state $\tilde{s}$. When the signal space is also finite, say $X = \{x_1, \ldots, x_M\}$, an experiment -reduces to an $N \times M$ **Markov matrix** +reduces to an $N \times M$ *Markov matrix* $$ \mu = [\mu_{ij}], \qquad @@ -131,7 +131,7 @@ $M \times K$ Markov matrix: $q_{lk} \geq 0$ and $\sum_k q_{lk} = 1$ for every ro ```{admonition} Definition (Sufficiency) :class: tip -Experiment $\mu$ is **sufficient for** $\nu$ if there exists a stochastic +Experiment $\mu$ is *sufficient for* $\nu$ if there exists a stochastic transformation $Q$ (an $M \times K$ Markov matrix) such that $$ @@ -219,7 +219,7 @@ $$ for the probability simplex. -For fixed $A$ and $u$, the set of **achievable expected-utility vectors** under experiment $\mu$ is +For fixed $A$ and $u$, the set of *achievable expected-utility vectors* under experiment $\mu$ is $$ B(\mu, A, u) = \Bigl\{v \in \mathbb{R}^N : @@ -651,7 +651,7 @@ In DeGroot's axiomatization, an uncertainty function is: - *Symmetric*: it depends on the components of $p$, not their labeling. - *Normalized*: it is maximized at $p = (1/N, \ldots, 1/N)$ and minimized at vertices. -The **value of experiment $\mu$ given prior $p$** is +The *value of experiment $\mu$ given prior $p$* is $$ I(\tilde{x}_\mu;\, \tilde{s};\, U) @@ -1251,7 +1251,7 @@ $$ which is the sufficient statistic that the Bayesian tracks throughout. -The **distribution** of $\pi_t$ over all possible histories $w^t$ is Kihlstrom's $\hat{\mu}^c$ — the distribution of posteriors induced by the experiment $\mu_t$ starting from prior $\pi_0 = c$. +The *distribution* of $\pi_t$ over all possible histories $w^t$ is Kihlstrom's $\hat{\mu}^c$ — the distribution of posteriors induced by the experiment $\mu_t$ starting from prior $\pi_0 = c$. ### Why more data always helps @@ -1265,7 +1265,7 @@ and in particular $E[\pi_t] = \pi_0$ for all $t$. This is exactly the **mean-preservation** condition that sits at the centre of Kihlstrom's reformulation: the distribution of posteriors $\hat{\mu}^c$ must satisfy $\int_P p \, \hat{\mu}^c(dp) = c$. -Mean preservation is not a special feature of this two-state example; it is an exact consequence of Bayes' law for **any** experiment. +Mean preservation is not a special feature of this two-state example; it is an exact consequence of Bayes' law for *any* experiment. Blackwell's theorem explains why more data always helps. @@ -1293,7 +1293,7 @@ $$ which equals the **mutual information** between the history $w^t$ and the unknown state. -Because $\mu_t$ Blackwell-dominates $\mu_s$ for $t > s$, Blackwell's theorem guarantees $I(\mu_t; U) \geq I(\mu_s; U)$ for **every** concave uncertainty function $U$ — more draws reduce expected uncertainty under every such measure, not just Shannon entropy. +Because $\mu_t$ Blackwell-dominates $\mu_s$ for $t > s$, Blackwell's theorem guarantees $I(\mu_t; U) \geq I(\mu_s; U)$ for *every* concave uncertainty function $U$ — more draws reduce expected uncertainty under every such measure, not just Shannon entropy. ### Summary table diff --git a/lectures/survival_recursive_preferences.md b/lectures/survival_recursive_preferences.md index 5a18afb4e..5c1ce6b32 100644 --- a/lectures/survival_recursive_preferences.md +++ b/lectures/survival_recursive_preferences.md @@ -11,157 +11,193 @@ kernelspec: name: python3 --- +(survival_recursive_preferences)= +```{raw} jupyter + +``` + # Survival and Long-Run Dynamics under Recursive Preferences ```{index} single: Survival; Recursive Preferences ``` +```{contents} Contents +:depth: 2 +``` + ## Overview -This lecture describes a theory of **long-run survival** of agents with heterogeneous beliefs -developed by {cite}`Borovicka2020`. +This lecture studies the theory of long-run survival in {cite:t}`Borovicka2020`. + +The classical **market selection hypothesis** says that agents with less accurate beliefs are driven out of the market in the long run. -The classical **market selection hypothesis** asserts that agents with incorrect beliefs -will be driven from the market in the long run --- they will lose all of their wealth -to agents with more accurate beliefs. +This result was established rigorously by {cite:t}`Sandroni2000Markets` and {cite:t}`Blume_Easley2006` for economies with separable CRRA preferences. -This result was established rigorously by {cite}`Sandroni2000` and {cite}`Blume_Easley2006` -for economies in which agents have **separable** (CRRA) preferences. +Borovicka shows that the conclusion can fail under Epstein-Zin recursive preferences. -{cite}`Borovicka2020` shows that when agents have **recursive preferences** -of the {cite}`Epstein_Zin1989` type, the market selection hypothesis can fail: -agents with incorrect beliefs can survive and even prosper in the long run. +With recursive preferences, agents with distorted beliefs can survive and can even dominate. -The key insight is that recursive preferences **disentangle** risk aversion from the -intertemporal elasticity of substitution (IES), and this separation opens new channels -through which agents with incorrect beliefs can accumulate wealth. +The key mechanism is that recursive preferences separate risk aversion from the intertemporal elasticity of substitution. -Three survival channels emerge: +That separation creates three channels that matter for survival: -1. **Risk premium channel**: a more optimistic agent earns a higher expected logarithmic - return on her portfolio by holding a larger share of risky assets -2. **Speculative volatility channel**: speculative portfolio positions generate volatile - returns that penalize survival through a Jensen's inequality effect -3. **Saving channel**: under high IES, an agent who believes her portfolio has a high - expected return responds by saving more, which can help her outsave extinction +1. The *risk premium channel* rewards the more optimistic agent for holding more of the risky asset. +1. The *speculative volatility channel* penalizes aggressive positions through log-return volatility. +1. The *saving channel* changes consumption and saving decisions when the IES differs from one. -Under separable CRRA preferences, only the first two channels operate, and they ensure -that the agent with more accurate beliefs always dominates. -With recursive preferences, the saving channel can tip the balance in favor of an agent -whose beliefs are less accurate. +Under separable preferences, only the first two channels remain. + +Under recursive preferences, the saving channel can overturn market selection. ```{note} -The paper builds on the continuous-time recursive utility formulation of {cite}`Duffie_Epstein1992a`, -using the planner's problem approach of {cite}`Dumas_Uppal_Wang2000`. +The paper builds on the continuous-time recursive utility formulation of {cite:t}`Duffie_Epstein1992a`, +using the planner's problem approach of {cite:t}`Dumas_Uppal_Wang2000`. + Important foundations for the market selection hypothesis were laid by -{cite}`DeLong_etal1991` and {cite}`Blume_Easley1992`. +{cite:t}`DeLong_etal1991` and {cite:t}`Blume_Easley1992`. ``` -Let's start with some imports: +We start with some imports. ```{code-cell} ipython3 import numpy as np import matplotlib.pyplot as plt -from scipy.integrate import solve_bvp -from scipy.optimize import brentq ``` ## Environment -The economy is populated by two types of infinitely lived agents, $n \in \{1, 2\}$, -who have identical recursive preferences but **differ in their beliefs** about the -distribution of future aggregate endowment. +The economy contains two infinitely lived agents, indexed by $n \in \{1, 2\}$. + +The agents have identical recursive preferences but different beliefs about aggregate endowment growth. + +We write Borovička's belief distortions $u^n$ as $\omega^n$. ### Aggregate endowment -Aggregate endowment $Y$ follows a geometric Brownian motion under the true probability -measure $P$: +Under the true probability measure $P$, aggregate endowment satisfies $$ d \log Y_t = \mu_Y dt + \sigma_Y dW_t, \quad Y_0 > 0 $$ (eq:endowment) -where $W$ is a standard Brownian motion, $\mu_Y$ is the drift, and $\sigma_Y > 0$ is the -volatility. +where $W$ is a standard Brownian motion, $\mu_Y$ is the drift, and $\sigma_Y > 0$ is the volatility. ### Heterogeneous beliefs -Agent $n$ perceives the drift of aggregate endowment to be $\mu_Y + \omega^n \sigma_Y$ instead -of $\mu_Y$. +Agent $n$ believes that the drift is $\mu_Y + \omega^n \sigma_Y$ instead of $\mu_Y$. -The parameter $\omega^n$ captures the degree of **optimism** ($\omega^n > 0$) or **pessimism** -($\omega^n < 0$) of agent $n$. +The parameter $\omega^n$ measures optimism when $\omega^n > 0$ and pessimism when $\omega^n < 0$. -Formally, agent $n$'s subjective probability measure $Q^n$ is defined by the -Radon-Nikodým derivative +Agent $n$'s subjective probability measure $Q^n$ is defined by the Radon–Nikodym derivative $$ M_t^n = \frac{dQ^n}{dP}\bigg|_t = \exp\left(-\frac{1}{2} |\omega^n|^2 t + \omega^n W_t\right) $$ (eq:radon_nikodym) -Under her own measure $Q^n$, agent $n$ believes that $W_t^n = W_t - \omega^n t$ is a -Brownian motion, so that +Under $Q^n$, the process $W_t^n = W_t - \omega^n t$ is a Brownian motion, and agent $n$ perceives $$ -d \log Y_t = (\mu_Y + \omega^n \sigma_Y) dt + \sigma_Y dW_t^n +d \log Y_t = (\mu_Y + \omega^n \sigma_Y) dt + \sigma_Y dW_t^n . $$ -Agent $n$ with $\omega^n > 0$ is **optimistic** about the growth rate of aggregate endowment; -agent $n$ with $\omega^n < 0$ is **pessimistic**. +An agent with $\omega^n > 0$ is optimistic about endowment growth, while an agent with $\omega^n < 0$ is pessimistic. ### Recursive preferences -Both agents have Duffie-Epstein-Zin recursive preferences characterized by three -parameters: +Both agents have Epstein-Zin recursive preferences. -* $\gamma > 0$: coefficient of relative risk aversion (CRRA) -* $\rho^{-1} > 0$: intertemporal elasticity of substitution (IES) -* $\beta > 0$: time-preference rate +We use $\gamma > 0$ for relative risk aversion, $\rho > 0$ for the inverse of the IES, and $\beta > 0$ for the time-preference rate. -The felicity function for these preferences is +The Duffie-Epstein-Zin felicity function is $$ -F(C, \nu) = \beta \frac{C^{1-\gamma}}{1-\gamma} \cdot \frac{(1-\gamma) - (1-\rho)\nu / \beta}{\rho - \gamma} +F(C, \nu) += \beta \frac{C^{1-\gamma}}{1-\gamma} +\cdot +\frac{(1-\gamma) - (1-\rho)\nu / \beta}{\rho - \gamma} $$ (eq:felicity) where $\nu$ is the endogenous discount rate. ```{note} +In discrete time, Epstein-Zin preferences aggregate current consumption with a certainty equivalent of future utility via a CES aggregator (see {doc}`doubts_or_variability`). + +In continuous time there is no "next-period $V_{t+1}$," so {cite:t}`Duffie_Epstein1992a` recast the recursion as a felicity function $F(C,\nu)$ that depends on the agent's own continuation-value rate $\nu$. + +The two formulations encode the same separation of risk aversion $\gamma$ from the inverse IES $\rho$. + When $\gamma = \rho$, preferences reduce to the standard separable CRRA case. -The disentanglement of risk aversion $\gamma$ from the inverse IES $\rho$ is the key -feature that drives the new survival results. ``` -## Planner's Problem +## Planner's problem -Following {cite}`Dumas_Uppal_Wang2000`, we study optimal allocations using a social -planner who maximizes a weighted average of the two agents' continuation values. +Following {cite}`Dumas_Uppal_Wang2000`, we study equilibrium allocations through a social planner's problem. -The planner assigns consumption shares $z^1$ and $z^2 = 1 - z^1$ to the two agents -and chooses discount rate processes $\nu^n$ for each agent. +The planner chooses consumption shares $z^1$ and $z^2 = 1 - z^1$ and discount-rate processes $\nu^n$ for the two agents. ### Modified discount factors -It is convenient to incorporate the belief distortions into modified discount factor -processes $\tilde{\lambda}^n = \lambda^n M^n$, where $\lambda^n$ is the standard discount factor. +It is convenient to absorb belief distortions into the modified discount factors $\tilde{\lambda}^n = \lambda^n M^n$, where $M^n$ is the Radon-Nikodym derivative {eq}`eq:radon_nikodym`. -The modified discount factor evolves as +These processes satisfy $$ -d \log \tilde{\lambda}_t^n = -\left(\nu_t^n + \frac{1}{2}(\omega^n)^2\right) dt + \omega^n dW_t +d \log \tilde{\lambda}_t^n += -\left(\nu_t^n + \frac{1}{2} (\omega^n)^2\right) dt + \omega^n dW_t . $$ (eq:modified_discount) +```{exercise} +:label: ex_modified_discount + +Derive {eq}`eq:modified_discount`. + +*Hint:* Use $\log \tilde{\lambda}^n = \log \lambda^n + \log M^n$. The Pareto weight $\lambda^n$ evolves as $d\log \lambda_t^n = -\nu_t^n \, dt$, and $\log M_t^n$ is given by {eq}`eq:radon_nikodym`. +``` + +```{solution-start} ex_modified_discount +:class: dropdown +``` + +From the definition $\tilde{\lambda}^n = \lambda^n M^n$, we have + +$$ +\log \tilde{\lambda}_t^n = \log \lambda_t^n + \log M_t^n. +$$ + +The Pareto weight satisfies $d\log \lambda_t^n = -\nu_t^n \, dt$. + +From {eq}`eq:radon_nikodym`, $\log M_t^n = -\frac{1}{2}|\omega^n|^2 t + \omega^n W_t$, so + +$$ +d \log M_t^n = -\tfrac{1}{2}(\omega^n)^2 \, dt + \omega^n \, dW_t. +$$ + +Adding the two: + +$$ +d \log \tilde{\lambda}_t^n = -\nu_t^n \, dt - \tfrac{1}{2}(\omega^n)^2 \, dt + \omega^n \, dW_t = -\left(\nu_t^n + \tfrac{1}{2}(\omega^n)^2\right) dt + \omega^n \, dW_t. +$$ + +```{solution-end} +``` + ### State variable: Pareto share -The key state variable is the **Pareto share** of agent 1: +The key state variable is the Pareto share of agent 1: $$ \upsilon = \frac{\tilde{\lambda}^1}{\tilde{\lambda}^1 + \tilde{\lambda}^2} \in (0, 1) $$ (eq:pareto_share) -This single scalar captures the relative weight of agent 1 in the planner's allocation. +It captures the relative weight of agent 1 in the planner's allocation. + +Define the log-odds ratio $\vartheta = \log(\upsilon / (1 - \upsilon))$. -The dynamics of the log-odds ratio $\vartheta = \log(\upsilon / (1-\upsilon))$ are +Its dynamics are $$ d\vartheta_t = \underbrace{\left[\nu_t^2 + \frac{1}{2}(\omega^2)^2 - \nu_t^1 - \frac{1}{2}(\omega^1)^2\right]}_{m_{\vartheta}(\upsilon_t)} dt + (\omega^1 - \omega^2) dW_t @@ -169,27 +205,91 @@ $$ (eq:log_odds) The drift $m_\vartheta(\upsilon)$ determines the long-run behavior of the Pareto share. +```{exercise} +:label: ex_log_odds + +Derive {eq}`eq:log_odds` from {eq}`eq:modified_discount` and the definition $\vartheta = \log(\upsilon/(1-\upsilon))$. + +*Hint:* First show that $\vartheta = \log \tilde{\lambda}^1 - \log \tilde{\lambda}^2$, then subtract the two SDEs. +``` + +```{solution-start} ex_log_odds +:class: dropdown +``` + +Since $\upsilon = \tilde{\lambda}^1 / (\tilde{\lambda}^1 + \tilde{\lambda}^2)$, we have $1 - \upsilon = \tilde{\lambda}^2 / (\tilde{\lambda}^1 + \tilde{\lambda}^2)$, so + +$$ +\vartheta = \log\frac{\upsilon}{1-\upsilon} = \log \tilde{\lambda}^1 - \log \tilde{\lambda}^2. +$$ + +From {eq}`eq:modified_discount`, the two log-discount-factor SDEs are + +$$ +d\log \tilde{\lambda}^1_t = -\left(\nu_t^1 + \tfrac{1}{2}(\omega^1)^2\right)dt + \omega^1 dW_t, +$$ + +$$ +d\log \tilde{\lambda}^2_t = -\left(\nu_t^2 + \tfrac{1}{2}(\omega^2)^2\right)dt + \omega^2 dW_t. +$$ + +Subtracting the second from the first: + +$$ +d\vartheta_t = \left[\nu_t^2 + \tfrac{1}{2}(\omega^2)^2 - \nu_t^1 - \tfrac{1}{2}(\omega^1)^2\right]dt + (\omega^1 - \omega^2)dW_t. +$$ + +```{solution-end} +``` + ### HJB equation -The planner's value function takes the form -$J(\tilde{\lambda}_t, Y_t) = (\tilde{\lambda}_t^1 + \tilde{\lambda}_t^2) Y_t^{1-\gamma} \tilde{J}(\upsilon_t)$, -where $\tilde{J}(\upsilon)$ solves a nonlinear ODE: +Homotheticity reduces the planner's problem to a nonlinear ODE in the single state variable $\upsilon$. + +Because each agent's utility is homogeneous of degree $1-\gamma$ in consumption, the planner's value function factors as $J(\upsilon, Y) = \tilde{J}(\upsilon) \cdot Y^{1-\gamma}/(1-\gamma)$, eliminating $Y$ as a state variable. + +#### From discrete to continuous time + +In discrete time, a planner maximizes a weighted sum of agents' utilities by choosing allocations at each date. + +The Bellman equation is + +$$ +\tilde{J}(\upsilon) = \max_{z^1, z^2} \left\{ \upsilon \, u(z^1) + (1-\upsilon) \, u(z^2) + \beta \, \mathbb{E}\left[\tilde{J}(\upsilon')\right] \right\}. +$$ + +In continuous time, the period length shrinks to $dt$. + +The "flow payoff" over $[t, t+dt)$ becomes $\left[\upsilon F(z^1, \nu^1) + (1-\upsilon)F(z^2, \nu^2)\right] dt$, where $F$ is the Duffie-Epstein-Zin felicity {eq}`eq:felicity`. + +The expected change in the value function over $dt$ is captured by the **infinitesimal generator** $\mathcal{L}$. + +For a diffusion $d\upsilon = m \, dt + s \, dW$, Itô's lemma gives + +$$ +\mathcal{L}\tilde{J}(\upsilon) = m(\upsilon)\,\tilde{J}'(\upsilon) + \tfrac{1}{2} s(\upsilon)^2 \, \tilde{J}''(\upsilon), +$$ + +where $m$ and $s$ are the drift and diffusion of the Pareto share. + +This is the continuous-time analogue of $\beta \, \mathbb{E}[\tilde{J}(\upsilon')] - \tilde{J}(\upsilon)$: it measures how the value function drifts and fluctuates as $\upsilon$ evolves. + +Setting flow payoff plus expected capital gain equal to zero gives the HJB equation: $$ 0 = \sup_{(z^1,z^2,\nu^1,\nu^2)} \left\{ \upsilon F(z^1, \nu^1) + (1-\upsilon) F(z^2, \nu^2) + \mathcal{L} \tilde{J}(\upsilon) \right\} $$ (eq:hjb) -subject to $z^1 + z^2 \leq 1$, where $\mathcal{L}$ is a second-order differential operator -that captures the drift and diffusion of the state variables. +subject to $z^1 + z^2 \leq 1$. + +This is the continuous-time counterpart of the discrete-time planner's problem in {cite:t}`Blume_Easley2006` (see also {doc}`likelihood_ratio_process_2`). -The boundary conditions are $\tilde{J}(0) = V^2$ and $\tilde{J}(1) = V^1$, where $V^n$ is the -value in a homogeneous economy populated only by agent $n$. +The boundary conditions match the continuation values of the corresponding homogeneous economies. -## Survival Conditions +## Survival conditions -The central result of the paper characterizes survival in terms of the boundary behavior -of the drift $m_\vartheta(\upsilon)$. +The central result characterizes survival by the boundary behavior of $m_\vartheta(\upsilon)$. ```{prf:proposition} :label: survival_conditions @@ -209,138 +309,290 @@ $$ Then: -**(a)** If (i) and (ii) hold, both agents survive under $P$. +*(a)* If (i) and (ii) hold, both agents survive under $P$. -**(b)** If (i) and (ii') hold, agent 1 dominates in the long run under $P$. +*(b)* If (i) and (ii') hold, agent 1 dominates in the long run under $P$. -**(c)** If (i') and (ii) hold, agent 2 dominates in the long run under $P$. +*(c)* If (i') and (ii) hold, agent 2 dominates in the long run under $P$. -**(d)** If (i') and (ii') hold, each agent dominates with strictly positive probability. +*(d)* If (i') and (ii') hold, each agent dominates with strictly positive probability. ``` -The proof uses the Feller classification of boundary behavior for diffusion processes, -as discussed in {cite}`Karlin_Taylor1981`. +The proof uses the Feller classification of boundary behavior for diffusions, as in {cite:t}`Karlin_Taylor1981`. + +Condition (i) says that when agent 1 is close to extinction, there is a force pushing her share back up. + +Condition (ii) says that when agent 1 is close to absorbing the whole economy, there is a force pushing her share back down. -The intuition is straightforward: condition (i) says that when agent 1's share is -nearly zero, there is a force pushing it back up; condition (ii) says that when agent 1's -share is nearly one, there is a force pushing it back down. When both forces are present, the Pareto share is recurrent and both agents survive. -## Wealth Dynamics Decomposition +## Wealth dynamics decomposition -The survival conditions can be expressed in terms of equilibrium wealth dynamics. -When agent 1 becomes negligible ($\upsilon \searrow 0$), equilibrium prices converge to -those in a homogeneous economy populated by agent 2. +We now rewrite the survival conditions from {prf:ref}`survival_conditions` in terms of equilibrium wealth dynamics. -The difference in logarithmic wealth growth rates decomposes as +Agent 1 survives near extinction if and only if her wealth grows faster than agent 2's when she is negligibly small. + +When $\upsilon \searrow 0$, prices are set entirely by agent 2, as if the economy were homogeneous. + +Agent 1 is a price-taker in agent 2's economy. + +Let $m_A^n(\upsilon)$ denote the expected log growth rate of agent $n$'s wealth. + +The difference decomposes into two channels: $$ \lim_{\upsilon \searrow 0} [m_A^1(\upsilon) - m_A^2(\upsilon)] = \underbrace{\lim_{\upsilon \searrow 0} [m_R^1(\upsilon) - m_R^2(\upsilon)]}_{\text{portfolio returns}} -+ \underbrace{\lim_{\upsilon \searrow 0} [(y^2(\upsilon))^{-1} - (y^1(\upsilon))^{-1}]}_{\text{consumption rates}} ++ \underbrace{\lim_{\upsilon \searrow 0} [(y^2(\upsilon))^{-1} - (y^1(\upsilon))^{-1}]}_{\text{consumption-wealth ratios}} $$ (eq:wealth_decomp) +The first term measures how much faster agent 1's portfolio grows. + +The second measures how much less agent 1 consumes out of wealth — a lower consumption-wealth ratio means more saving and faster wealth accumulation. + +When this total difference is positive, agent 1 survives; when negative, she shrinks toward extinction. + +```{exercise} +:label: ex_wealth_decomp + +Derive {eq}`eq:wealth_decomp`. + +Let $A^n$ denote agent $n$'s wealth and $C^n$ her consumption. + +The budget constraint is $dA^n = A^n dR^n - C^n dt$, where $dR^n$ is the return on agent $n$'s portfolio. + +Define the consumption-wealth ratio $c^n = C^n / A^n = (y^n)^{-1}$. + +Show that $d\log A^n = m_R^n \, dt - (y^n)^{-1} dt + \ldots$, so the difference in expected log wealth growth is $m_A^1 - m_A^2 = (m_R^1 - m_R^2) + [(y^2)^{-1} - (y^1)^{-1}]$. +``` + +```{solution-start} ex_wealth_decomp +:class: dropdown +``` + +Dividing the budget constraint by $A^n$: + +$$ +\frac{dA^n}{A^n} = dR^n - (y^n)^{-1} dt. +$$ + +By Itô's lemma, $d\log A^n = \frac{dA^n}{A^n} - \frac{1}{2}\left(\frac{dA^n}{A^n}\right)^2$. + +Write $dR^n = m_R^n \, dt + \sigma_R^n \, dW$ (the portfolio return under $P$). + +Then + +$$ +d\log A^n = \left(m_R^n - (y^n)^{-1} - \tfrac{1}{2}(\sigma_R^n)^2\right) dt + \sigma_R^n \, dW. +$$ + +Taking the difference for agents 1 and 2: + +$$ +m_A^1 - m_A^2 = (m_R^1 - m_R^2) + \left[(y^2)^{-1} - (y^1)^{-1}\right] - \tfrac{1}{2}\left[(\sigma_R^1)^2 - (\sigma_R^2)^2\right]. +$$ + +The volatility terms $\tfrac{1}{2}[(\sigma_R^1)^2 - (\sigma_R^2)^2]$ are absorbed into $m_R^1 - m_R^2$ when we define $m_R^n$ as the expected log portfolio return (i.e., the drift of $\log R^n$ rather than the arithmetic return), giving {eq}`eq:wealth_decomp`. + +```{solution-end} +``` + ### Portfolio returns -The difference in expected logarithmic portfolio returns at the boundary is +At the boundary $\upsilon \searrow 0$, the difference in expected log portfolio returns is $$ -\lim_{\upsilon \searrow 0} [m_R^1 - m_R^2] = \underbrace{\frac{\omega^1 - \omega^2}{\gamma} \cdot \sigma_Y}_{\text{difference in portfolios}} +\lim_{\upsilon \searrow 0} [m_R^1 - m_R^2] += \underbrace{\frac{\omega^1 - \omega^2}{\gamma \sigma_Y}}_{\text{difference in risky shares}} \cdot \underbrace{(\gamma \sigma_Y^2 - \omega^2 \sigma_Y)}_{\text{risk premium}} -- \underbrace{\frac{1}{2}\left(\frac{\omega^1 - \omega^2}{\gamma}\right)^2}_{\text{volatility penalty}} +- \underbrace{\frac{\omega^1 - \omega^2}{\gamma} +\left(\sigma_Y + \frac{\omega^1 - \omega^2}{2\gamma}\right)}_{\text{volatility term}} $$ (eq:portfolio_returns) -This depends **only** on risk aversion $\gamma$, not on the IES. +An optimistic agent ($\omega^1 > \omega^2$) overweights the risky asset by $(\omega^1 - \omega^2)/(\gamma \sigma_Y)$ relative to agent 2 and earns the equity risk premium on that extra exposure. + +The subtracted *volatility penalty* reflects the cost of holding a more extreme portfolio: higher variance of log returns drags down expected log wealth growth. -### Consumption rates +This term depends on risk aversion $\gamma$ but not on the IES, because portfolio choice is determined by risk aversion alone. -The difference in consumption rates at the boundary is +```{exercise} +:label: ex_portfolio_returns + +Derive {eq}`eq:portfolio_returns`. + +At the boundary $\upsilon \searrow 0$, agent $n$'s optimal risky-asset share is $\pi^n = 1 + (\omega^n - \omega^2)/(\gamma \sigma_Y)$ (see {eq}`eq:portfolio`). + +The expected log return on the risky asset under $P$ is $\mu_R = \mu_Y + \gamma \sigma_Y^2 - \omega^2 \sigma_Y - \frac{1}{2}\sigma_Y^2$, and the risk-free rate is $r$. + + The expected log portfolio return is $m_R^n = r + \pi^n(\mu_R - r) - \frac{1}{2}(\pi^n)^2 \sigma_Y^2$. + +Compute $m_R^1 - m_R^2$ and simplify. +``` + +```{solution-start} ex_portfolio_returns +:class: dropdown +``` + +Using $m_R^n = r + \pi^n(\mu_R - r) - \frac{1}{2}(\pi^n)^2 \sigma_Y^2$, the difference is + +$$ +m_R^1 - m_R^2 = (\pi^1 - \pi^2)(\mu_R - r) - \tfrac{1}{2}[(\pi^1)^2 - (\pi^2)^2]\sigma_Y^2. +$$ + +The difference in risky shares is $\pi^1 - \pi^2 = (\omega^1 - \omega^2)/(\gamma \sigma_Y)$. + +The equity premium (log) is $\mu_R - r = \gamma \sigma_Y^2 - \omega^2 \sigma_Y - \frac{1}{2}\sigma_Y^2$, but for the first term we only need the product: + +$$ +(\pi^1 - \pi^2)(\mu_R - r) = \frac{\omega^1 - \omega^2}{\gamma \sigma_Y} \cdot (\gamma \sigma_Y^2 - \omega^2 \sigma_Y) - \frac{\omega^1 - \omega^2}{2\gamma}. +$$ + +For the volatility term, write $(\pi^1)^2 - (\pi^2)^2 = (\pi^1 - \pi^2)(\pi^1 + \pi^2)$ and note $\pi^1 + \pi^2 = 2 + (\omega^1 + \omega^2 - 2\omega^2)/(\gamma \sigma_Y)$. After simplification: + +$$ +\tfrac{1}{2}[(\pi^1)^2 - (\pi^2)^2]\sigma_Y^2 = \frac{\omega^1 - \omega^2}{\gamma}\left(\sigma_Y + \frac{\omega^1 - \omega^2}{2\gamma}\right). +$$ + +Combining the two pieces gives {eq}`eq:portfolio_returns`. + +```{solution-end} +``` + +### Consumption-wealth ratios + +The difference in consumption-wealth ratios at the boundary is $$ \lim_{\upsilon \searrow 0} [(y^2)^{-1} - (y^1)^{-1}] = \frac{1-\rho}{\rho} \left[(\omega^1 - \omega^2)\sigma_Y + \frac{(\omega^1 - \omega^2)^2}{2\gamma}\right] $$ (eq:consumption_rates) -This depends on $\rho$ (and hence the IES) but enters **only** through the consumption-saving -decision. +The term in brackets is the difference in *subjective* expected portfolio returns — what agent 1 believes she earns relative to agent 2. + +The prefactor $(1-\rho)/\rho$ translates this perceived return advantage into a saving response. + +- When IES $> 1$ ($\rho < 1$), the prefactor is positive: a higher perceived return makes the agent save more, because the substitution effect dominates the income effect. +- When IES $< 1$ ($\rho > 1$), the prefactor is negative: the income effect dominates and the agent saves less, working against survival. +- When IES $= 1$ ($\rho = 1$), the two effects cancel and the saving channel vanishes entirely. + +This is the channel through which recursive preferences alter survival outcomes by separating $\gamma$ from $\rho$. + +```{exercise} +:label: ex_consumption_wealth + +Derive {eq}`eq:consumption_rates`. + +In the homogeneous economy populated by agent 2, the consumption-wealth ratio is $(y(0))^{-1} = \beta - (1-\rho)\mu_V^2$, where $\mu_V^2$ is agent 2's expected log return on wealth. Agent 1, as a negligible price-taker, has consumption-wealth ratio $(y^1)^{-1} = \beta - (1-\rho)\mu_V^1$, where $\mu_V^1$ is her own expected log return. + +Use $(y^2)^{-1} - (y^1)^{-1} = (1-\rho)(\mu_V^1 - \mu_V^2)$ and express $\mu_V^1 - \mu_V^2$ in terms of agent 1's *subjective* expected excess return. + +*Hint:* Under agent 1's beliefs, her portfolio earns an extra $(\omega^1 - \omega^2)\sigma_Y + (\omega^1 - \omega^2)^2/(2\gamma)$ in expected log returns relative to agent 2's portfolio. +``` + +```{solution-start} ex_consumption_wealth +:class: dropdown +``` + +The consumption-wealth ratio for agent $n$ satisfies $(y^n)^{-1} = \beta - (1-\rho)\mu_V^n$, where $\mu_V^n$ is the expected log return on agent $n$'s wealth under her own subjective measure. + +Taking the difference: + +$$ +(y^2)^{-1} - (y^1)^{-1} = (1-\rho)(\mu_V^1 - \mu_V^2). +$$ + +Agent 1's subjective expected log portfolio return exceeds agent 2's by the amount she believes she gains from tilting toward the risky asset. + +Her extra risky share is $\pi^1 - 1 = (\omega^1 - \omega^2)/(\gamma\sigma_Y)$, and under her subjective measure $Q^1$ the risky asset's expected excess log return is $(\gamma\sigma_Y^2 + (\omega^1 - \omega^2)\sigma_Y - \omega^2\sigma_Y) - r - \frac{1}{2}\sigma_Y^2$. + +After simplification, the subjective expected log return difference is + +$$ +\mu_V^1 - \mu_V^2 = (\omega^1 - \omega^2)\sigma_Y + \frac{(\omega^1 - \omega^2)^2}{2\gamma}. +$$ + +Substituting and dividing through by $\rho$ (from the relationship between $(y^n)^{-1}$ and $\beta$): + +$$ +(y^2)^{-1} - (y^1)^{-1} = \frac{1-\rho}{\rho}\left[(\omega^1 - \omega^2)\sigma_Y + \frac{(\omega^1 - \omega^2)^2}{2\gamma}\right]. +$$ + +```{solution-end} +``` + +### Two comparative statics + +Survival depends on $\gamma$, $\rho$, and the signal-to-noise ratios $\omega^1 / \sigma_Y$ and $\omega^2 / \sigma_Y$, not on $\omega^1$, $\omega^2$, and $\sigma_Y$ separately. + +The survival conditions do not depend on $\beta$ or $\mu_Y$, which affect the level of consumption and prices but not relative wealth dynamics at the boundary. ```{code-cell} ipython3 -def portfolio_return_diff(omega1, omega2, gamma, sigma_y): +def portfolio_return_diff(ω_1, ω_2, γ, σ_y): """ - Difference in expected log portfolio returns at boundary v → 0. + Difference in expected log portfolio returns at the boundary. Parameters ---------- - omega1 : float + ω_1 : float Belief distortion of agent 1 - omega2 : float + ω_2 : float Belief distortion of agent 2 - gamma : float + γ : float Risk aversion - sigma_y : float + σ_y : float Endowment volatility Returns ------- float - Difference in log portfolio returns, decomposed into - (risk_premium_effect, volatility_penalty) + Difference in expected log portfolio returns """ - delta_omega = omega1 - omega2 - portfolio_diff = delta_omega / gamma - risk_premium = gamma * sigma_y**2 - omega2 * sigma_y - risk_premium_effect = portfolio_diff * risk_premium * sigma_y - # Correct formula from the paper: - # (ω1-ω2)/γ * σ_y * (γσ_y² - ω²σ_y) - (1/2)((ω1-ω2)/γ + ω1-ω2)² - # Simplify using Prop 3.4 - diff_portfolios = delta_omega / gamma - rp = gamma * sigma_y - omega2 - volatility_penalty = 0.5 * (delta_omega * sigma_y / gamma - + delta_omega)**2 - total = diff_portfolios * sigma_y * rp - volatility_penalty - return total - - -def consumption_rate_diff(omega1, omega2, gamma, rho, sigma_y): + Δω = ω_1 - ω_2 + risky_share_diff = Δω / (γ * σ_y) + risk_premium = γ * σ_y**2 - ω_2 * σ_y + volatility_term = (Δω / γ) * (σ_y + 0.5 * Δω / γ) + return risky_share_diff * risk_premium - volatility_term + + +def saving_channel(ω_1, ω_2, γ, ρ, σ_y): """ - Difference in consumption rates at boundary v → 0. + Difference in consumption-wealth ratios at the boundary. Parameters ---------- - omega1, omega2 : float + ω_1, ω_2 : float Belief distortions - gamma : float + γ : float Risk aversion - rho : float + ρ : float Inverse of IES - sigma_y : float + σ_y : float Endowment volatility Returns ------- float """ - delta_omega = omega1 - omega2 - subjective_return_diff = (delta_omega * sigma_y - + delta_omega**2 / (2 * gamma)) - return (1 - rho) / rho * subjective_return_diff + Δω = ω_1 - ω_2 + subjective_return_diff = Δω * σ_y + Δω**2 / (2 * γ) + return (1 - ρ) / ρ * subjective_return_diff -def survival_drift(omega1, omega2, gamma, rho, sigma_y): +def boundary_drift(ω_1, ω_2, γ, ρ, σ_y): """ - Drift m_ϑ at boundary v → 0, determining survival of agent 1. + Boundary drift m_ϑ when agent 1 becomes negligible. Positive drift means agent 1 survives (repelling boundary). Parameters ---------- - omega1, omega2 : float + ω_1, ω_2 : float Belief distortions of agents 1 and 2 - gamma : float + γ : float Risk aversion - rho : float + ρ : float Inverse of IES - sigma_y : float + σ_y : float Endowment volatility Returns @@ -348,806 +600,626 @@ def survival_drift(omega1, omega2, gamma, rho, sigma_y): float Drift at v = 0 """ - pr = portfolio_return_diff(omega1, omega2, gamma, sigma_y) - cr = consumption_rate_diff(omega1, omega2, gamma, rho, sigma_y) - return gamma * (pr + cr) + return γ * ( + portfolio_return_diff(ω_1, ω_2, γ, σ_y) + + saving_channel(ω_1, ω_2, γ, ρ, σ_y) + ) ``` -## Survival Regions +## Survival regions -A central contribution of {cite}`Borovicka2020` is the characterization of -**survival regions** in the $(\gamma, \rho)$ parameter space. +A central contribution of {cite}`Borovicka2020` is the characterization of survival regions in the $(\gamma, \rho)$ plane. -Under separable CRRA preferences ($\gamma = \rho$), the agent with more accurate beliefs -always dominates --- this is the market selection hypothesis. +Under separable preferences, $\gamma = \rho$, the agent with more accurate beliefs always dominates. -Under recursive preferences, all four survival outcomes from {prf:ref}`survival_conditions` -can occur. +Under recursive preferences, all four outcomes in {prf:ref}`survival_conditions` can occur. -Let us compute and plot the survival regions for different levels of belief distortion, -following Figure 2 of {cite}`Borovicka2020`. +Figure 2 in the paper studies the case where agent 2 has correct beliefs, so $\omega^2 = 0$. -We focus on the case where agent 2 has correct beliefs ($\omega^2 = 0$) and agent 1 -has distorted beliefs. +The next cell follows that figure. ```{code-cell} ipython3 -def compute_survival_boundary(omega1, omega2, sigma_y, - gamma_range, boundary='lower'): +def compute_survival_boundary(ω_1, ω_2, σ_y, γ_grid, boundary="lower"): """ - Compute the boundary curve in (γ, ρ) space where survival - condition holds with equality. + Compute the curve in (γ, ρ) space where the boundary drift is zero. - For boundary='lower' (v → 0): drift at v=0 = 0, giving - condition for agent 1's survival. - For boundary='upper' (v → 1): drift at v=1 = 0, giving - condition for agent 2's survival (symmetric). + For boundary='lower', agent 1 is the small agent. + For boundary='upper', agent 2 is the small agent. - Returns ρ as function of γ along the boundary. + Returns + ------- + np.ndarray + Boundary values of ρ as a function of γ """ - rho_boundary = [] - - if boundary == 'lower': - # Agent 1 survival: drift at v→0 = 0 - # portfolio_returns + consumption_rate_diff = 0 - for gamma in gamma_range: - pr = portfolio_return_diff(omega1, omega2, gamma, - sigma_y) - delta_omega = omega1 - omega2 - subj_ret = (delta_omega * sigma_y - + delta_omega**2 / (2 * gamma)) - if abs(subj_ret) < 1e-15: - rho_boundary.append(np.nan) - continue - # pr + (1-ρ)/ρ * subj_ret = 0 - # pr*ρ + subj_ret - ρ*subj_ret = 0 - # ρ(pr - subj_ret) = -subj_ret - # ρ = -subj_ret / (pr - subj_ret) - # = subj_ret / (subj_ret - pr) - denom = subj_ret - pr - if abs(denom) < 1e-15: - rho_boundary.append(np.nan) - else: - rho_val = subj_ret / denom - rho_boundary.append(rho_val) + ρ_boundary = [] + + if boundary == "lower": + small_agent = (ω_1, ω_2) else: - # Agent 2 survival: drift at v→1 = 0 (symmetric) - for gamma in gamma_range: - pr = portfolio_return_diff(omega2, omega1, gamma, - sigma_y) - delta_omega = omega2 - omega1 - subj_ret = (delta_omega * sigma_y - + delta_omega**2 / (2 * gamma)) - if abs(subj_ret) < 1e-15: - rho_boundary.append(np.nan) - continue - denom = subj_ret - pr - if abs(denom) < 1e-15: - rho_boundary.append(np.nan) - else: - rho_val = subj_ret / denom - rho_boundary.append(rho_val) - - return np.array(rho_boundary) -``` + small_agent = (ω_2, ω_1) -```{code-cell} ipython3 -sigma_y = 0.02 + ω_small, ω_large = small_agent + + for γ in γ_grid: + pr = portfolio_return_diff(ω_small, ω_large, γ, σ_y) + Δω = ω_small - ω_large + subj_ret = Δω * σ_y + Δω**2 / (2 * γ) + + if abs(subj_ret) < 1e-14: + ρ_boundary.append(np.nan) + continue + + denom = subj_ret - pr + if abs(denom) < 1e-14: + ρ_boundary.append(np.nan) + else: + ρ_boundary.append(subj_ret / denom) -fig, axes = plt.subplots(2, 2, figsize=(14, 12)) + return np.asarray(ρ_boundary) -# Four cases of belief distortion -cases = [ - (0.25, 0, r'$\omega^1 = 0.25$ (moderate optimism)'), - (1.0, 0, r'$\omega^1 = 1.0$ (strong optimism)'), - (5.0, 0, r'$\omega^1 \to \infty$ (extreme optimism / $\sigma_Y \to 0$)'), - (-0.5, 0, r'$\omega^1 = -0.5$ (pessimism)') + +def compute_limit_boundary(γ_grid, boundary="lower"): + """ + Boundary curves for the limit |ω_1| / σ_y -> ∞. + + This is equivalent to the constant-endowment case discussed in the paper. + """ + if boundary == "lower": + return γ_grid / (1 + γ_grid) + + ρ = np.full_like(γ_grid, np.nan, dtype=float) + mask = γ_grid < 1 + ρ[mask] = γ_grid[mask] / (1 - γ_grid[mask]) + return ρ +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Survival regions corresponding to Figure 2 in Borovicka (2020) + name: fig-survival-regions +--- +σ_y = 0.02 +γ_grid = np.linspace(0.01, 6.0, 500) +ρ_max = 2.0 + +panel_specs = [ + ("finite", 0.10, r"$\omega^1 = 0.10$"), + ("finite", 0.20, r"$\omega^1 = 0.20$"), + ("limit", None, r"$|\omega^1| / \sigma_Y \to \infty$"), + ("finite", -0.25, r"$\omega^1 = -0.25$"), ] -gamma_range = np.linspace(0.1, 30, 500) - -for idx, (omega1, omega2, title) in enumerate(cases): - ax = axes[idx // 2][idx % 2] - - # Compute boundaries - rho_lower = compute_survival_boundary(omega1, omega2, sigma_y, - gamma_range, - boundary='lower') - rho_upper = compute_survival_boundary(omega1, omega2, sigma_y, - gamma_range, - boundary='upper') - - # Clean up invalid values - rho_lower = np.clip(rho_lower, 0.01, 30) - rho_upper = np.clip(rho_upper, 0.01, 30) - - # Plot boundaries - ax.plot(gamma_range, rho_lower, 'b--', linewidth=2, - label=r'Agent 1 survival boundary') - ax.plot(gamma_range, rho_upper, 'r-', linewidth=2, - label=r'Agent 2 survival boundary') - - # CRRA diagonal - ax.plot(gamma_range, gamma_range, 'k:', linewidth=1, - label=r'CRRA ($\gamma = \rho$)') - - # Label regions - ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) - ax.set_ylabel(r'Inverse of IES $\rho$', fontsize=12) - ax.set_title(title, fontsize=13) - ax.set_xlim(0, 30) - ax.set_ylim(0, 30) - ax.legend(fontsize=9, loc='upper left') +fig, axes = plt.subplots(2, 2, figsize=(13, 10), sharex=True, sharey=True) +for idx, (case, value, label) in enumerate(panel_specs): + ax = axes.flat[idx] + + if case == "limit": + ρ_1 = compute_limit_boundary(γ_grid, boundary="lower") + ρ_2 = compute_limit_boundary(γ_grid, boundary="upper") + else: + ρ_1 = compute_survival_boundary(value, 0.0, σ_y, γ_grid, boundary="lower") + ρ_2 = compute_survival_boundary(value, 0.0, σ_y, γ_grid, boundary="upper") + + ρ_1 = np.where((ρ_1 > 0) & (ρ_1 < ρ_max), ρ_1, np.nan) + ρ_2 = np.where((ρ_2 > 0) & (ρ_2 < ρ_max), ρ_2, np.nan) + + lower = np.minimum(ρ_1, ρ_2) + upper = np.maximum(ρ_1, ρ_2) + mask = np.isfinite(lower) & np.isfinite(upper) + + ax.fill_between( + γ_grid, lower, upper, where=mask, color="C2", alpha=0.18, + label="both survive" if idx == 0 else None + ) + ax.plot( + γ_grid, ρ_1, "--", color="C0", lw=2, + label="agent 1 boundary" if idx == 0 else None + ) + ax.plot( + γ_grid, ρ_2, "-", color="C3", lw=2, + label="agent 2 boundary" if idx == 0 else None + ) + ax.plot( + γ_grid, γ_grid, ":", color="black", lw=2, + label=r"$\gamma = \rho$" if idx == 0 else None + ) + + ax.set_title(label, fontsize=12) + ax.set_xlim(0, 6) + ax.set_ylim(0, 2) + ax.set_xlabel(r"risk aversion $\gamma$") + ax.set_ylabel(r"inverse IES $\rho$") + +axes[0, 0].legend(loc="upper left", fontsize=9) plt.tight_layout() plt.show() ``` -The **shaded region** between the two boundaries corresponds to parameter combinations -where both agents coexist in the long run --- a **nondegenerate stationary distribution** -of wealth exists. +Each panel plots two curves in the $(\gamma, \rho)$ plane for a different value of agent 1's belief distortion $\omega^1$ (agent 2 has correct beliefs, $\omega^2 = 0$). -Key observations: +- The dashed curve (blue) is where the boundary drift at $\upsilon = 0$ equals zero — condition (i) in {prf:ref}`survival_conditions`. +- The solid curve (red) is where the boundary drift at $\upsilon = 1$ equals zero — condition (ii). +- The shaded region between the two curves is where both agents survive. +- The dotted diagonal $\gamma = \rho$ is the separable CRRA case, along which the agent with more accurate beliefs always dominates. -* Along the CRRA diagonal ($\gamma = \rho$, dotted line), the agent with more accurate - beliefs always dominates, confirming {cite}`Sandroni2000` and {cite}`Blume_Easley2006` +Moderate optimism ($\omega^1 = 0.10$) produces a narrow coexistence region. -* The coexistence region lies in the empirically relevant part of the parameter space - where $\gamma > \rho$ (i.e., risk aversion exceeds the inverse of IES) +Stronger optimism ($\omega^1 = 0.20$) widens it substantially. -* As optimism increases, the coexistence region expands +In the limit $|\omega^1|/\sigma_Y \to \infty$ (bottom-left), the boundaries simplify to closed-form expressions. -* A pessimistic agent can survive only when IES is sufficiently high and risk aversion - is not too large +Pessimistic distortions ($\omega^1 = -0.25$, bottom-right) can also survive, but only in a much narrower part of the parameter space. +## Three survival channels -## Three Survival Channels - -Let us now visualize the contribution of each survival channel to the total survival -drift, varying one parameter at a time. +The decomposition above can be visualized directly. ```{code-cell} ipython3 -def decompose_survival(omega1, omega2, gamma_vals, rho, sigma_y): +def decompose_survival(ω_1, ω_2, γ_grid, ρ, σ_y): """ - Decompose survival drift into three channels. - - Returns arrays for: - - risk premium channel - - volatility penalty - - saving channel + Decompose the wealth-growth differential in proposition 3.4. """ - delta_omega = omega1 - omega2 - - risk_premium_ch = np.zeros_like(gamma_vals) - vol_penalty_ch = np.zeros_like(gamma_vals) - saving_ch = np.zeros_like(gamma_vals) - - for i, gamma in enumerate(gamma_vals): - # Portfolio difference × risk premium - diff_port = delta_omega / gamma - rp = gamma * sigma_y - omega2 - risk_premium_ch[i] = diff_port * sigma_y * rp - - # Volatility penalty (always negative for survival) - vol_penalty_ch[i] = -0.5 * (delta_omega * sigma_y / gamma - + delta_omega)**2 - - # Saving channel - subj_ret = (delta_omega * sigma_y - + delta_omega**2 / (2 * gamma)) - saving_ch[i] = (1 - rho) / rho * subj_ret - - total = risk_premium_ch + vol_penalty_ch + saving_ch - return risk_premium_ch, vol_penalty_ch, saving_ch, total - - -# Parameters -sigma_y = 0.02 -omega1 = 0.25 -omega2 = 0.0 # correct beliefs -rho = 0.67 # IES = 1.5 - -gamma_vals = np.linspace(0.5, 25, 300) - -rp, vp, sc, total = decompose_survival(omega1, omega2, gamma_vals, - rho, sigma_y) - -fig, ax = plt.subplots(figsize=(12, 7)) - -ax.plot(gamma_vals, rp, 'b-', linewidth=2, - label='Risk premium channel') -ax.plot(gamma_vals, vp, 'r--', linewidth=2, - label='Volatility penalty') -ax.plot(gamma_vals, sc, 'g-.', linewidth=2, - label='Saving channel') -ax.plot(gamma_vals, total, 'k-', linewidth=3, - label='Total survival drift') -ax.axhline(0, color='gray', linewidth=0.5) -ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=13) -ax.set_ylabel('Contribution to survival drift', fontsize=13) -ax.set_title( - rf'Decomposition of survival channels ($\omega^1={omega1}$, ' - rf'$\omega^2={omega2}$, IES$={1/rho:.1f}$, ' - rf'$\sigma_Y={sigma_y}$)', - fontsize=13 + Δω = ω_1 - ω_2 + risk_premium_term = Δω * (γ_grid * σ_y - ω_2) / γ_grid + volatility_term = -(Δω / γ_grid) * (σ_y + 0.5 * Δω / γ_grid) + saving_term = (1 - ρ) / ρ * (Δω * σ_y + Δω**2 / (2 * γ_grid)) + total = risk_premium_term + volatility_term + saving_term + return risk_premium_term, volatility_term, saving_term, total + + +ω_1 = 0.25 +ω_2 = 0.0 +ρ = 0.67 +σ_y = 0.02 +γ_grid = np.linspace(0.5, 25.0, 300) + +risk_term, vol_term, save_term, total = decompose_survival( + ω_1, ω_2, γ_grid, ρ, σ_y ) -ax.legend(fontsize=11) + +fig, ax = plt.subplots(figsize=(11, 6)) +ax.plot(γ_grid, risk_term, color="C0", lw=2, label="risk premium term") +ax.plot(γ_grid, vol_term, "--", color="C3", lw=2, label="volatility term") +ax.plot(γ_grid, save_term, "-.", color="C2", lw=2, label="saving term") +ax.plot(γ_grid, total, color="black", lw=2, label="total") +ax.axhline(0, color="gray", lw=1) +ax.set_xlabel(r"risk aversion $\gamma$") +ax.set_ylabel("contribution to wealth-growth differential") +ax.legend() plt.tight_layout() plt.show() ``` -The figure reveals the distinct roles of the three channels: - -* The **volatility penalty** (red dashed) is dominant at low risk aversion --- speculative - portfolios generate volatile returns that hurt the incorrect agent +This figure decomposes the boundary drift at $\upsilon = 0$ into three terms for an optimistic agent ($\omega^1 = {0.25}$, $\omega^2 = 0$) with IES $= 1/\rho \approx 1.49$ and $\sigma_Y = 0.02$. -* The **risk premium channel** (blue) increases with risk aversion --- the more optimistic - agent earns a higher return by holding more of the risky asset +- The risk premium term (blue) is positive throughout because the optimistic agent overweights the risky asset and earns the equity premium. +- The volatility term (red dashed) is negative and large at low $\gamma$, reflecting the cost of holding a volatile portfolio. +- The saving term (green dash-dot) is positive when IES $> 1$ because the optimistic agent perceives a high return on wealth and saves more aggressively. +- The total (black) crosses zero at the critical $\gamma$ below which the volatility penalty dominates and the agent cannot survive. -* The **saving channel** (green) provides a constant positive lift when IES $> 1$ --- - the optimistic agent saves more in response to her perceived high returns +## Varying the IES - -## Varying IES - -The intertemporal elasticity of substitution plays a critical role in survival outcomes. +The sign of the saving term is pinned down by the IES. ```{code-cell} ipython3 -fig, axes = plt.subplots(1, 3, figsize=(18, 5)) +--- +mystnb: + figure: + caption: Boundary decomposition for different IES values + name: fig-survival-ies-panels +--- +fig, axes = plt.subplots(1, 3, figsize=(16, 4.5), sharey=True) -gamma_fixed = 10.0 -omega1 = 0.25 -omega2 = 0.0 -sigma_y = 0.02 +ω_1 = 0.25 +ω_2 = 0.0 +σ_y = 0.02 +γ_grid = np.linspace(0.5, 25.0, 300) ies_values = [0.5, 1.0, 1.5] -ies_labels = ['IES = 0.5 (inelastic)', 'IES = 1.0 (log)', - 'IES = 1.5 (elastic)'] -gamma_range = np.linspace(0.5, 25, 300) - -for idx, (ies, label) in enumerate(zip(ies_values, ies_labels)): - rho = 1.0 / ies - - rp, vp, sc, total = decompose_survival(omega1, omega2, - gamma_range, - rho, sigma_y) +for idx, ies in enumerate(ies_values): + ρ = 1.0 / ies + risk_term, vol_term, save_term, total = decompose_survival( + ω_1, ω_2, γ_grid, ρ, σ_y + ) ax = axes[idx] - ax.plot(gamma_range, rp, 'b-', linewidth=2, - label='Risk premium') - ax.plot(gamma_range, vp, 'r--', linewidth=2, - label='Volatility penalty') - ax.plot(gamma_range, sc, 'g-.', linewidth=2, - label='Saving channel') - ax.plot(gamma_range, total, 'k-', linewidth=3, - label='Total') - ax.axhline(0, color='gray', linewidth=0.5) - ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) - ax.set_ylabel('Contribution', fontsize=12) - ax.set_title(label, fontsize=13) - ax.legend(fontsize=9) - + ax.plot(γ_grid, risk_term, color="C0", lw=2, label="risk premium") + ax.plot(γ_grid, vol_term, "--", color="C3", lw=2, label="volatility") + ax.plot(γ_grid, save_term, "-.", color="C2", lw=2, label="saving") + ax.plot(γ_grid, total, color="black", lw=2, label="total") + ax.axhline(0, color="gray", lw=1) + ax.set_title(f"IES = {ies:.1f}", fontsize=12) + ax.set_xlabel(r"risk aversion $\gamma$") + ax.set_ylabel("contribution") + +axes[0].legend(fontsize=9) plt.tight_layout() plt.show() ``` -Key insights: +Each panel shows the same three-term decomposition as the previous figure, but now for three different values of the IES ($\omega^1 = 0.25$, $\omega^2 = 0$, $\sigma_Y = 0.02$). + +- Left panel (IES $= 0.5$): the saving term is negative, so the optimistic agent actually saves less, working against survival. +- Center panel (IES $= 1.0$): the saving term vanishes entirely, reproducing the separable benchmark. +- Right panel (IES $= 1.5$): the saving term is positive and shifts the total drift upward, expanding the range of $\gamma$ values for which the optimistic agent survives. -* When **IES $= 1$** (center), the saving channel vanishes: consumption-wealth ratios - are constant and equal to $\beta$, as in the logarithmic case. - Only the risk premium and volatility channels matter. +## Asymptotic results -* When **IES $> 1$** (right), the saving channel is positive for the optimistic agent: - she perceives high expected returns and responds by saving more, helping her survive. +Borovicka derives several useful asymptotic results. -* When **IES $< 1$** (left), the saving channel reverses direction: higher perceived - returns lead to *lower* saving (the income effect dominates), hurting the - optimistic agent's survival. +1. As $\gamma \searrow 0$, each agent dominates with strictly positive probability. +1. As $\gamma \nearrow \infty$, the relatively more optimistic agent dominates. +1. As $\rho \searrow 0$, the relatively more optimistic agent always survives. +1. As $\rho \nearrow \infty$, a nondegenerate long-run equilibrium cannot exist. +The next figure illustrates the first result by plotting both boundary drifts as $\gamma$ becomes small. -## Asymptotic Results +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Boundary drifts for small risk aversion + name: fig-boundary-drifts-small-gamma +--- +ω_1 = 0.25 +ω_2 = 0.0 +ρ = 0.67 +σ_y = 0.02 +γ_grid = np.linspace(0.05, 5.0, 300) + +drift_at_0 = np.array([boundary_drift(ω_1, ω_2, γ, ρ, σ_y) for γ in γ_grid]) +drift_at_1 = np.array([-boundary_drift(ω_2, ω_1, γ, ρ, σ_y) for γ in γ_grid]) + +fig, ax = plt.subplots(figsize=(10, 5)) +ax.plot(γ_grid, drift_at_0, color="C0", lw=2, label=r"$\upsilon \to 0$") +ax.plot(γ_grid, drift_at_1, "--", color="C3", lw=2, label=r"$\upsilon \to 1$") +ax.axhline(0, color="gray", lw=1) +ax.set_xlabel(r"risk aversion $\gamma$") +ax.set_ylabel("boundary drift") +ax.legend() +plt.tight_layout() +plt.show() +``` -{cite}`Borovicka2020` establishes four key asymptotic results: +This figure plots the two boundary drifts as a function of $\gamma$ ($\omega^1 = 0.25$, $\omega^2 = 0$, IES $\approx 1.49$). -**(a) Near risk neutrality** ($\gamma \searrow 0$): each agent dominates with strictly positive -probability. -Low risk aversion encourages speculative portfolio positions. -The volatile returns create a diverging force --- one agent must eventually become -extinct, but which one depends on the realized path. +- The solid blue curve is the drift $m_\vartheta$ at $\upsilon \to 0$ (agent 1 near extinction); coexistence requires this to be positive (condition (i)). +- The dashed red curve is the drift $m_\vartheta$ at $\upsilon \to 1$ (agent 2 near extinction); coexistence requires this to be negative (condition (ii)). -**(b) High risk aversion** ($\gamma \nearrow \infty$): the relatively more optimistic agent -always dominates. -The risk premium channel dominates, and the pessimistic agent pays too high a price -for insurance. +The figure illustrates asymptotic result 1. -**(c) High IES** ($\rho \searrow 0$): the relatively more optimistic agent always survives. -The saving channel is strong enough to prevent her extinction. -Whether the pessimistic agent also survives depends on risk aversion. +For small $\gamma$, the blue curve is negative and the red curve is also negative. -**(d) Low IES** ($\rho \nearrow \infty$): a nondegenerate long-run equilibrium cannot exist. -Inelastic preferences cause the saving channel to work against survival of the -small agent, regardless of identity. +Both drifts push the Pareto share in the same direction — toward $\upsilon = 1$ — so whichever agent happens to get ahead early will dominate. -```{code-cell} ipython3 -# Illustrate asymptotic result (a): near risk neutrality -fig, ax = plt.subplots(figsize=(10, 6)) - -omega1 = 0.25 -omega2 = 0.0 -sigma_y = 0.02 -rho = 0.67 # IES = 1.5 - -# Show drift at both boundaries as function of gamma -gamma_vals = np.linspace(0.05, 5, 300) - -drift_v0 = np.array([survival_drift(omega1, omega2, g, rho, sigma_y) - for g in gamma_vals]) -# Drift at v=1 by swapping agents -drift_v1 = np.array([survival_drift(omega2, omega1, g, rho, sigma_y) - for g in gamma_vals]) - -ax.plot(gamma_vals, drift_v0, 'b-', linewidth=2, - label=r'Drift at $\upsilon \to 0$ (agent 1 survival)') -ax.plot(gamma_vals, -drift_v1, 'r--', linewidth=2, - label=r'Drift at $\upsilon \to 1$ (agent 2 survival)') -ax.axhline(0, color='gray', linewidth=0.5) -ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=13) -ax.set_ylabel('Survival drift', fontsize=13) -ax.set_title('Boundary drifts as risk aversion varies', fontsize=13) -ax.legend(fontsize=11) -plt.tight_layout() -plt.show() -``` +This is outcome (d) in {prf:ref}`survival_conditions`: neither boundary is repelling, so each agent dominates with strictly positive probability depending on the realized Brownian path. + +As $\gamma$ increases past roughly 1, the blue curve crosses zero and becomes positive while the red curve stays negative. +Now both boundaries are repelling and we enter the coexistence region — outcome (a). -## The Separable Case: CRRA Benchmark +## The separable case -Under separable CRRA preferences ($\gamma = \rho$), the dynamics of the log-odds -Pareto share $\vartheta$ become a Brownian motion with constant drift: +When $\gamma = \rho$, the model collapses to the separable CRRA benchmark. + +In that case, the log-odds process becomes $$ -d\vartheta_t = \frac{1}{2}\left[(\omega^2)^2 - (\omega^1)^2\right] dt + (\omega^1 - \omega^2) dW_t +d\vartheta_t = \frac{1}{2}\left[(\omega^2)^2 - (\omega^1)^2\right] dt + (\omega^1 - \omega^2) dW_t . $$ -The drift does not depend on the state $\upsilon$ and is determined entirely by the -**relative entropy** (Kullback-Leibler divergence) of the agents' beliefs: -$\frac{1}{2}|\omega^n|^2$. +The drift is constant and depends only on the relative entropy of the two belief distortions. + +The agent with the smaller $|\omega^n|$ dominates under $P$. -The agent with small $|\omega^n|$ --- more accurate beliefs --- always dominates. +If the two agents have equal magnitudes of belief distortions, neither becomes extinct almost surely, but no nondegenerate stationary wealth distribution exists. ```{code-cell} ipython3 -def simulate_crra_pareto(omega1, omega2, T, dt, n_paths, seed=42): +--- +mystnb: + figure: + caption: Pareto-share paths in the separable benchmark + name: fig-crra-pareto-paths +--- +def simulate_crra_pareto(ω_1, ω_2, T, dt, n_paths, seed=42): """ - Simulate Pareto share dynamics under CRRA (γ = ρ). - - Parameters - ---------- - omega1, omega2 : float - Belief distortions - T : float - Time horizon - dt : float - Time step - n_paths : int - Number of sample paths - seed : int - Random seed - - Returns - ------- - t_grid : array - Time grid - v_paths : array - Pareto share paths, shape (n_paths, n_steps) + Simulate Pareto-share dynamics in the separable benchmark. """ rng = np.random.default_rng(seed) n_steps = int(T / dt) t_grid = np.linspace(0, T, n_steps + 1) - # Drift and vol of log-odds - drift = 0.5 * (omega2**2 - omega1**2) - vol = omega1 - omega2 + drift = 0.5 * (ω_2**2 - ω_1**2) + volatility = ω_1 - ω_2 - # Initial log-odds (v0 = 0.5 -> theta0 = 0) - theta = np.zeros((n_paths, n_steps + 1)) - dW = rng.normal(0, np.sqrt(dt), (n_paths, n_steps)) + θ = np.zeros((n_paths, n_steps + 1)) + dW = rng.normal(0.0, np.sqrt(dt), size=(n_paths, n_steps)) for t in range(n_steps): - theta[:, t+1] = theta[:, t] + drift * dt + vol * dW[:, t] + θ[:, t + 1] = θ[:, t] + drift * dt + volatility * dW[:, t] - # Convert to Pareto share - v_paths = 1.0 / (1.0 + np.exp(-theta)) + υ_paths = 1.0 / (1.0 + np.exp(-θ)) + return t_grid, υ_paths - return t_grid, v_paths +ω_1 = 0.10 +ω_2 = 0.0 +t_grid, υ_paths = simulate_crra_pareto(ω_1, ω_2, T=200, dt=0.01, n_paths=50) -# Simulate -T = 200 -dt = 0.01 -n_paths = 50 +fig, ax = plt.subplots(figsize=(11, 5)) -omega1 = 0.1 # slightly optimistic (incorrect) -omega2 = 0.0 # correct beliefs - -t_grid, v_paths = simulate_crra_pareto(omega1, omega2, T, dt, - n_paths) - -fig, ax = plt.subplots(figsize=(12, 6)) - -for i in range(min(20, n_paths)): - ax.plot(t_grid, v_paths[i], alpha=0.3, linewidth=0.5) +for i in range(20): + ax.plot(t_grid, υ_paths[i], color="C0", alpha=0.25, lw=1) -ax.axhline(0.5, color='gray', linestyle=':', alpha=0.5) -ax.set_xlabel('Time $t$', fontsize=13) -ax.set_ylabel(r'Pareto share $\upsilon_t$', fontsize=13) -ax.set_title( - rf'CRRA case ($\gamma = \rho$): Agent 2 (correct, $\omega^2=0$) ' - rf'dominates over agent 1 ($\omega^1={omega1}$)', - fontsize=13 -) +ax.axhline(0.5, color="gray", linestyle=":", lw=1) +ax.set_xlabel("time") +ax.set_ylabel(r"Pareto share $\upsilon_t$") ax.set_ylim(0, 1) plt.tight_layout() plt.show() ``` -Under separable preferences, agent 2 (with correct beliefs) always drives agent 1's -Pareto share to zero. - +This figure simulates 20 sample paths of the Pareto share $\upsilon_t$ under separable CRRA preferences ($\gamma = \rho$) with $\omega^1 = 0.10$ and $\omega^2 = 0$. -## Economy with Constant Aggregate Endowment +Agent 2 has correct beliefs, so the log-odds drift is negative and all paths trend toward $\upsilon = 0$. -An illuminating special case arises when aggregate endowment is constant -($\mu_Y = \sigma_Y = 0$). -In this economy, agents trade purely for **speculative** motives. +Agent 1 is driven to extinction — the classical market-selection result of {cite:t}`Blume_Easley2006`. -The survival results do not depend on $\mu_Y$ or $\sigma_Y$ independently but only on -the ratio $\omega^n / \sigma_Y$. -The limit $\sigma_Y \to 0$ with $\omega^1 \neq 0$ thus isolates the saving channel. +## Constant aggregate endowment -In this case: +Section IV.D.3 studies the limiting case in which aggregate endowment is constant, so $\mu_Y = \sigma_Y = 0$. -* The risk premium is zero (no aggregate risk) -* The speculative volatility channel is present but muted at high risk aversion -* The saving channel alone can generate survival of the incorrect agent when IES $> 1$ +In the notation of the paper, this case is equivalent to the limit $|\omega^1| / \sigma_Y \to \infty$ studied in the bottom-left panel of figure 2. -```{code-cell} ipython3 -# Show survival regions for the limiting case ω/σ_y → ∞ -# (equivalent to σ_y → 0 or ω → ∞) - -fig, ax = plt.subplots(figsize=(10, 8)) - -gamma_range = np.linspace(0.1, 30, 500) - -# In the limit, survival of agent 1 requires IES > 1 -# i.e., ρ < 1 -ax.axhline(1.0, color='b', linestyle='--', linewidth=2, - label=r'Agent 1 survival: $\rho < 1$ (IES $> 1$)') - -# Agent 2 always survives (correct beliefs, no risk premium cost) -# The boundary is the CRRA line -ax.plot(gamma_range, gamma_range, 'k:', linewidth=1, - label=r'CRRA ($\gamma = \rho$)') - -# Shade coexistence region -ax.fill_between(gamma_range, 0, np.minimum(1.0, gamma_range), - alpha=0.2, color='green', - label='Both agents survive') -ax.fill_between(gamma_range, np.minimum(1.0, gamma_range), - np.ones_like(gamma_range), - where=gamma_range > 1, - alpha=0.2, color='blue', - label='Both survive (above CRRA, below ρ=1)') - -ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=13) -ax.set_ylabel(r'Inverse of IES $\rho$', fontsize=13) -ax.set_title( - r'Survival regions: $\sigma_Y \to 0$ (pure speculation)', - fontsize=13) -ax.set_xlim(0, 30) -ax.set_ylim(0, 10) -ax.legend(fontsize=10, loc='upper right') -plt.tight_layout() -plt.show() -``` +The point of the exercise is that the survival results do not rely on unbounded aggregate endowment. -In the economy without aggregate risk, IES $> 1$ is sufficient for the incorrect agent -to survive when risk aversion is sufficiently high. -This is the pure saving channel at work. +Even with deterministic aggregate consumption, agents can trade for purely speculative motives in complete markets. +As the negligible agent faces prices generated by the large agent, she can choose a speculative portfolio with a high *subjective* expected return. -## Asset Pricing Implications +When IES is above one, that high perceived return raises her saving rate and can allow her to outsave extinction. -{cite}`Borovicka2020` also shows that as the Pareto share of one agent becomes negligible, -current asset prices converge to those in a homogeneous economy populated by the -large agent. +This is the pure saving channel in isolation. -### Prices at the boundary +## Asset pricing implications -As $\upsilon \searrow 0$ (agent 2 dominates): +As one agent becomes negligible, current prices converge to those of the homogeneous economy populated by the large agent. -**Risk-free rate:** +When agent 2 is the large agent, proposition 5.1 implies $$ -\lim_{\upsilon \searrow 0} r(\upsilon) = \beta + \rho \mu_Y + \omega^2 \sigma_Y -+ \frac{1}{2}(1-\gamma)\sigma_Y^2 - \frac{1}{2}\gamma \sigma_Y^2 +\lim_{\upsilon \searrow 0} r(\upsilon) += \beta + \rho \mu_Y + \omega^2 \sigma_Y ++ \frac{1}{2} (1 - \gamma) \sigma_Y^2 +- \frac{1}{2} \gamma \sigma_Y^2 $$ (eq:riskfree) -**Wealth-consumption ratio:** +and $$ -\lim_{\upsilon \searrow 0} y(\upsilon) = \left[\beta - (1-\rho)\left(\mu_Y -+ \omega^2 \sigma_Y + \frac{1}{2}(1-\gamma)\sigma_Y^2\right)\right]^{-1} +\lim_{\upsilon \searrow 0} y(\upsilon) += \left[ +\beta - (1 - \rho) +\left( +\mu_Y + \omega^2 \sigma_Y + \frac{1}{2} (1 - \gamma) \sigma_Y^2 +\right) +\right]^{-1} . $$ (eq:wc_ratio) -### Portfolio choice of the negligible agent - -The small agent's portfolio share in the risky asset converges to +The aggregate wealth dynamics also converge to those of the homogeneous economy: $$ -\lim_{\upsilon \searrow 0} \pi^1(\upsilon) = 1 + \frac{\omega^1 - \omega^2}{\gamma \sigma_Y} -$$ (eq:portfolio) - -An optimistic agent ($\omega^1 > \omega^2$) holds a **leveraged** position ($\pi^1 > 1$). +\lim_{\upsilon \searrow 0} m_A(\upsilon) = \mu_Y, +\qquad +\lim_{\upsilon \searrow 0} \sigma_A(\upsilon) = \sigma_Y . +$$ -A pessimistic agent ($\omega^1 < \omega^2$) **shorts** the risky asset when -$\omega^1 - \omega^2 < -\gamma \sigma_Y$. +Proposition 5.3 then gives the negligible agent's own consumption-saving and portfolio choices. -```{code-cell} ipython3 -# Portfolio share of agent 1 as function of belief distortion -fig, ax = plt.subplots(figsize=(10, 6)) +Her consumption-wealth ratio converges to -omega2 = 0.0 -sigma_y = 0.02 +$$ +\lim_{\upsilon \searrow 0} (y^1(\upsilon))^{-1} += (y(0))^{-1} +- \frac{1-\rho}{\rho} +\left[ +(\omega^1 - \omega^2)\sigma_Y ++ \frac{(\omega^1 - \omega^2)^2}{2 \gamma} +\right] . +$$ -omega1_range = np.linspace(-0.5, 1.0, 300) +The small agent's risky-asset share converges to -for gamma in [2, 5, 10, 20]: - pi1 = 1 + (omega1_range - omega2) / (gamma * sigma_y) - ax.plot(omega1_range, pi1, linewidth=2, - label=rf'$\gamma = {gamma}$') +$$ +\lim_{\upsilon \searrow 0} \pi^1(\upsilon) += 1 + \frac{\omega^1 - \omega^2}{\gamma \sigma_Y} . +$$ (eq:portfolio) -ax.axhline(1.0, color='gray', linestyle=':', alpha=0.5) -ax.axhline(0.0, color='gray', linestyle=':', alpha=0.5) -ax.axvline(0.0, color='gray', linestyle=':', alpha=0.5) +Hence optimism implies leverage, while sufficiently strong pessimism implies shorting. -ax.set_xlabel(r'Belief distortion $\omega^1$', fontsize=13) -ax.set_ylabel(r'Portfolio share $\pi^1$', fontsize=13) -ax.set_title( - 'Portfolio share of negligible agent in risky asset', - fontsize=13) -ax.legend(fontsize=11) +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Limiting risky-asset shares of the small agent + name: fig-limiting-portfolio-shares +--- +ω_2 = 0.0 +σ_y = 0.02 +ω_grid = np.linspace(-0.5, 1.0, 300) + +fig, ax = plt.subplots(figsize=(10, 5)) + +for γ in [2, 5, 10, 20]: + π_1 = 1 + (ω_grid - ω_2) / (γ * σ_y) + ax.plot(ω_grid, π_1, lw=2, label=rf"$\gamma = {γ}$") + +ax.axhline(1.0, color="gray", linestyle=":", lw=1) +ax.axhline(0.0, color="gray", linestyle=":", lw=1) +ax.axvline(0.0, color="gray", linestyle=":", lw=1) +ax.set_xlabel(r"belief distortion $\omega^1$") +ax.set_ylabel(r"risky share $\pi^1$") +ax.legend() plt.tight_layout() plt.show() ``` -Key observations: - -* At $\omega^1 = 0$ (correct beliefs), the agent holds the market portfolio ($\pi^1 = 1$) +This figure plots the limiting risky-asset share $\pi^1$ of the negligible agent as a function of her belief distortion $\omega^1$ ($\omega^2 = 0$, $\sigma_Y = 0.02$), for four levels of risk aversion. -* Higher risk aversion reduces the speculative position toward the market portfolio +At $\omega^1 = 0$ the agent agrees with agent 2 and holds the market portfolio ($\pi^1 = 1$). -* A pessimistic agent with low risk aversion may take a large short position, - generating the volatile returns needed for the saving channel to operate +Optimism ($\omega^1 > 0$) leads to leverage ($\pi^1 > 1$), while sufficient pessimism ($\omega^1 < 0$) leads to shorting ($\pi^1 < 0$). +Higher risk aversion compresses these deviations toward one. -## Optimistic versus Pessimistic Distortions +## Optimistic and pessimistic distortions -A striking feature of the model is that optimistic and pessimistic belief distortions -have **asymmetric** effects on survival. +Optimistic and pessimistic beliefs affect survival asymmetrically. -An optimistic agent ($\omega^1 > 0$) benefits from both the risk premium channel -(she holds more of the risky asset and earns the risk premium) and the saving -channel (she perceives high returns and saves more under IES $> 1$). +An optimistic agent benefits from the risk premium term and, when IES $> 1$, from the saving term as well. -A pessimistic agent ($\omega^1 < 0$) is disadvantaged by the risk premium channel -(she holds less risky asset and foregoes the premium). -However, she can potentially survive through the saving channel if she shorts the -risky asset aggressively enough to perceive a high expected return on her own portfolio. +A pessimistic agent gives up the risk premium and can survive only if the saving effect is strong enough to offset that loss. ```{code-cell} ipython3 -fig, axes = plt.subplots(1, 2, figsize=(14, 6)) +--- +mystnb: + figure: + caption: Total boundary drift for optimistic and pessimistic distortions + name: fig-optimistic-pessimistic-drifts +--- +fig, axes = plt.subplots(1, 2, figsize=(14, 5), sharey=True) -sigma_y = 0.02 -omega2 = 0.0 -rho = 0.67 # IES = 1.5 -gamma_range = np.linspace(0.5, 25, 300) +σ_y = 0.02 +ω_2 = 0.0 +ρ = 0.67 +γ_grid = np.linspace(0.5, 25.0, 300) -# Optimistic agent ax = axes[0] -for omega1 in [0.1, 0.25, 0.5, 1.0]: - rp, vp, sc, total = decompose_survival(omega1, omega2, - gamma_range, - rho, sigma_y) - ax.plot(gamma_range, total, linewidth=2, - label=rf'$\omega^1 = {omega1}$') - -ax.axhline(0, color='gray', linewidth=0.5) -ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) -ax.set_ylabel('Total survival drift', fontsize=12) -ax.set_title('Optimistic agent 1', fontsize=13) -ax.legend(fontsize=10) - -# Pessimistic agent +for ω_1 in [0.1, 0.25, 0.5, 1.0]: + _, _, _, total = decompose_survival(ω_1, ω_2, γ_grid, ρ, σ_y) + ax.plot(γ_grid, total, lw=2, label=rf"$\omega^1 = {ω_1}$") +ax.axhline(0, color="gray", lw=1) +ax.set_title("optimistic", fontsize=12) +ax.set_xlabel(r"risk aversion $\gamma$") +ax.set_ylabel("boundary drift") +ax.legend(fontsize=9) + ax = axes[1] -for omega1 in [-0.1, -0.25, -0.5, -1.0]: - rp, vp, sc, total = decompose_survival(omega1, omega2, - gamma_range, - rho, sigma_y) - ax.plot(gamma_range, total, linewidth=2, - label=rf'$\omega^1 = {omega1}$') - -ax.axhline(0, color='gray', linewidth=0.5) -ax.set_xlabel(r'Risk aversion $\gamma$', fontsize=12) -ax.set_ylabel('Total survival drift', fontsize=12) -ax.set_title('Pessimistic agent 1', fontsize=13) -ax.legend(fontsize=10) +for ω_1 in [-0.1, -0.25, -0.5, -1.0]: + _, _, _, total = decompose_survival(ω_1, ω_2, γ_grid, ρ, σ_y) + ax.plot(γ_grid, total, lw=2, label=rf"$\omega^1 = {ω_1}$") +ax.axhline(0, color="gray", lw=1) +ax.set_title("pessimistic", fontsize=12) +ax.set_xlabel(r"risk aversion $\gamma$") +ax.legend(fontsize=9) plt.tight_layout() plt.show() ``` -For the optimistic agent (left), survival drift turns positive at moderate risk -aversion and stays positive. - -For the pessimistic agent (right), survival drift is negative for high risk aversion -and becomes positive only at intermediate risk aversion levels --- and only when the -belief distortion is large enough to induce an aggressive short position. +Both panels plot the total boundary drift at $\upsilon = 0$ as a function of $\gamma$ (IES $\approx 1.49$, $\omega^2 = 0$). +Where the curve is positive, agent 1 survives near extinction. +- Left panel (optimistic agent): larger $\omega^1$ means a bigger bet on the risky asset, so the volatility penalty dominates at low $\gamma$ but the drift turns positive once $\gamma$ is large enough. +- Right panel (pessimistic agent): a pessimistic agent gives up the risk premium by underweighting the risky asset, so the drift is negative for most of the parameter space and survival requires saving motives strong enough to offset the portfolio losses. -## Long-Run Consumption Distribution +## Long-run consumption distribution -When both agents survive, the stationary distribution of consumption shares provides -information about the typical wealth allocation. +When both agents survive, the Pareto share keeps moving across the whole interval $(0, 1)$. -{cite}`Borovicka2020` shows that when agent $n$ survives, she attains an -arbitrarily large consumption share $z^n \in (0, 1)$ with probability one at some -future date. +The next simulation is only a toy approximation. -Let us simulate the Pareto share dynamics in a simplified model to illustrate -the ergodic behavior. +It interpolates the drift between its two boundary values, so it illustrates the recurrence logic without solving the full equilibrium ODE. ```{code-cell} ipython3 -def simulate_pareto_share(omega1, omega2, gamma, rho, sigma_y, - beta, T, dt, n_paths=20, seed=42): +--- +mystnb: + figure: + caption: A toy stationary Pareto-share simulation + name: fig-toy-stationary-pareto-share +--- +def simulate_pareto_share_toy(ω_1, ω_2, γ, ρ, σ_y, T, dt, n_paths=20, seed=42): """ - Simulate Pareto share dynamics with state-dependent drift. - - This uses a simplified approximation where the endogenous - discount rate difference is computed from the boundary formulas. - - Parameters - ---------- - omega1, omega2 : float - Belief distortions - gamma, rho : float - Preference parameters - sigma_y : float - Endowment volatility - beta : float - Time preference - T : float - Time horizon - dt : float - Time step - n_paths : int - Number of paths - seed : int - Random seed - - Returns - ------- - t_grid, v_paths : arrays + Simulate a toy Pareto-share process by interpolating boundary drifts. """ rng = np.random.default_rng(seed) n_steps = int(T / dt) t_grid = np.linspace(0, T, n_steps + 1) - vol_theta = omega1 - omega2 - - # Compute boundary drifts for interpolation - drift_at_0 = survival_drift(omega1, omega2, gamma, rho, sigma_y) - drift_at_1 = -survival_drift(omega2, omega1, gamma, rho, sigma_y) + volatility = ω_1 - ω_2 + m_0 = boundary_drift(ω_1, ω_2, γ, ρ, σ_y) + m_1 = -boundary_drift(ω_2, ω_1, γ, ρ, σ_y) - theta = np.zeros((n_paths, n_steps + 1)) - dW = rng.normal(0, np.sqrt(dt), (n_paths, n_steps)) + θ = np.zeros((n_paths, n_steps + 1)) + dW = rng.normal(0.0, np.sqrt(dt), size=(n_paths, n_steps)) for t in range(n_steps): - v = 1.0 / (1.0 + np.exp(-theta[:, t])) - # Interpolate drift between boundaries - # Simple linear interpolation - drift = drift_at_0 * (1 - v) + drift_at_1 * v - theta[:, t+1] = (theta[:, t] - + drift * dt - + vol_theta * dW[:, t]) - - v_paths = 1.0 / (1.0 + np.exp(-theta)) - return t_grid, v_paths - - -# Parameters for coexistence region -omega1 = 0.25 -omega2 = 0.0 -gamma = 10.0 -rho = 0.67 # IES = 1.5 -sigma_y = 0.02 -beta = 0.05 - -T = 500 -dt = 0.05 - -t_grid, v_paths = simulate_pareto_share( - omega1, omega2, gamma, rho, sigma_y, beta, T, dt, - n_paths=50, seed=42 + υ = 1.0 / (1.0 + np.exp(-θ[:, t])) + drift = m_0 * (1 - υ) + m_1 * υ + θ[:, t + 1] = θ[:, t] + drift * dt + volatility * dW[:, t] + + υ_paths = 1.0 / (1.0 + np.exp(-θ)) + return t_grid, υ_paths + + +ω_1 = 0.25 +ω_2 = 0.0 +γ = 10.0 +ρ = 0.67 +σ_y = 0.02 + +t_grid, υ_paths = simulate_pareto_share_toy( + ω_1, ω_2, γ, ρ, σ_y, T=500, dt=0.05, n_paths=50, seed=42 ) -fig, axes = plt.subplots(1, 2, figsize=(16, 6)) +fig, axes = plt.subplots(1, 2, figsize=(14, 5)) -# Sample paths ax = axes[0] for i in range(20): - ax.plot(t_grid, v_paths[i], alpha=0.3, linewidth=0.5) -ax.axhline(0.5, color='gray', linestyle=':', alpha=0.5) -ax.set_xlabel('Time $t$', fontsize=12) -ax.set_ylabel(r'Pareto share $\upsilon_t$', fontsize=12) -ax.set_title('Sample paths of Pareto share\n' - r'($\gamma=10$, IES$=1.5$, $\omega^1=0.25$, ' - r'$\omega^2=0$)', - fontsize=12) + ax.plot(t_grid, υ_paths[i], color="C0", alpha=0.25, lw=1) +ax.axhline(0.5, color="gray", linestyle=":", lw=1) +ax.set_title("sample paths", fontsize=12) +ax.set_xlabel("time") +ax.set_ylabel(r"Pareto share $\upsilon_t$") ax.set_ylim(0, 1) -# Histogram of final values (approximate stationary distribution) ax = axes[1] -# Use last half of a very long simulation -t_grid_long, v_long = simulate_pareto_share( - omega1, omega2, gamma, rho, sigma_y, beta, - T=2000, dt=0.05, n_paths=5, seed=123 +_, υ_long = simulate_pareto_share_toy( + ω_1, ω_2, γ, ρ, σ_y, T=2000, dt=0.05, n_paths=5, seed=123 ) -# Pool observations from second half -v_stationary = v_long[:, v_long.shape[1]//2:].flatten() -ax.hist(v_stationary, bins=80, density=True, alpha=0.7, - color='steelblue', edgecolor='white') -ax.set_xlabel(r'Pareto share $\upsilon$', fontsize=12) -ax.set_ylabel('Density', fontsize=12) -ax.set_title('Approximate stationary distribution', fontsize=12) +υ_stationary = υ_long[:, υ_long.shape[1] // 2:].ravel() +ax.hist(υ_stationary, bins=80, density=True, color="steelblue", + edgecolor="white", alpha=0.7) +ax.set_title("approximate stationary density", fontsize=12) +ax.set_xlabel(r"Pareto share $\upsilon$") +ax.set_ylabel("density") ax.set_xlim(0, 1) plt.tight_layout() plt.show() ``` -When both agents survive, the Pareto share fluctuates persistently across the full -interval $(0, 1)$. -This means the incorrect agent periodically commands a substantial share of -aggregate consumption. +The left panel shows 20 sample paths of the Pareto share $\upsilon_t$ under parameters inside the coexistence region ($\omega^1 = 0.25$, $\omega^2 = 0$, $\gamma = 10$, IES $\approx 1.49$). -## Summary +Unlike the separable case in {numref}`fig-crra-pareto-paths`, the paths do not drift to zero — they repeatedly visit a wide range of values, bouncing between the two repelling boundaries. -{cite}`Borovicka2020` overturns the classical market selection hypothesis by showing -that under recursive preferences of the Epstein-Zin type, agents with incorrect -beliefs can survive --- and even thrive --- in the long run. +The right panel approximates the stationary density by pooling the second half of longer simulations. -The key findings are: +The interior mode confirms that neither agent is driven to extinction and the economy sustains a nondegenerate long-run distribution of wealth shares. -1. **Three channels** determine survival: the risk premium channel, the speculative - volatility channel, and the saving channel. Only the first two operate under - separable CRRA preferences. +## Summary -2. **IES matters**: When IES $> 1$, the saving channel helps agents with distorted beliefs - outsave extinction. When IES $< 1$, it works against them. +Recursive preferences weaken the classical market-selection result. -3. **Coexistence is generic**: For empirically relevant parameter values - ($\gamma > \rho$, IES $> 1$), nondegenerate stationary wealth distributions exist. +The portfolio return channel still rewards more optimistic beliefs. -4. **Optimism vs. pessimism**: Optimistic agents benefit from both the risk premium and - saving channels. Pessimistic agents can survive only through aggressive shorting - combined with high IES. +The volatility channel still penalizes aggressive positions. -5. **Price impact**: A surviving agent with currently negligible wealth has no impact on - current prices, but will affect prices in the future when her wealth share recovers. +But when IES $> 1$, the saving channel can be strong enough to keep a distorted-belief agent alive. -These results have important implications for asset pricing. -Models that feature agents with heterogeneous beliefs and recursive preferences can -generate persistent heterogeneity and endogenous fluctuations in the wealth -distribution, enriching the dynamics of equilibrium asset prices, risk premia, -and trading volume. +This is why recursive-preference economies can support stationary long-run wealth distributions with persistent heterogeneity in beliefs and portfolio positions. From 51e29fb2f9278bca99640d1ef6d6a6ba1667f4b9 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 2 Apr 2026 21:39:28 +1100 Subject: [PATCH 11/20] update --- lectures/survival_recursive_preferences.md | 106 ++++----------------- 1 file changed, 21 insertions(+), 85 deletions(-) diff --git a/lectures/survival_recursive_preferences.md b/lectures/survival_recursive_preferences.md index 5c1ce6b32..df250e243 100644 --- a/lectures/survival_recursive_preferences.md +++ b/lectures/survival_recursive_preferences.md @@ -135,7 +135,7 @@ When $\gamma = \rho$, preferences reduce to the standard separable CRRA case. ## Planner's problem -Following {cite}`Dumas_Uppal_Wang2000`, we study equilibrium allocations through a social planner's problem. +Following {cite:t}`Dumas_Uppal_Wang2000`, we study equilibrium allocations through a social planner's problem. The planner chooses consumption shares $z^1$ and $z^2 = 1 - z^1$ and discount-rate processes $\nu^n$ for the two agents. @@ -447,7 +447,9 @@ $$ (\pi^1 - \pi^2)(\mu_R - r) = \frac{\omega^1 - \omega^2}{\gamma \sigma_Y} \cdot (\gamma \sigma_Y^2 - \omega^2 \sigma_Y) - \frac{\omega^1 - \omega^2}{2\gamma}. $$ -For the volatility term, write $(\pi^1)^2 - (\pi^2)^2 = (\pi^1 - \pi^2)(\pi^1 + \pi^2)$ and note $\pi^1 + \pi^2 = 2 + (\omega^1 + \omega^2 - 2\omega^2)/(\gamma \sigma_Y)$. After simplification: +For the volatility term, write $(\pi^1)^2 - (\pi^2)^2 = (\pi^1 - \pi^2)(\pi^1 + \pi^2)$ and note $\pi^1 + \pi^2 = 2 + (\omega^1 + \omega^2 - 2\omega^2)/(\gamma \sigma_Y)$. + +After simplification: $$ \tfrac{1}{2}[(\pi^1)^2 - (\pi^2)^2]\sigma_Y^2 = \frac{\omega^1 - \omega^2}{\gamma}\left(\sigma_Y + \frac{\omega^1 - \omega^2}{2\gamma}\right). @@ -469,10 +471,10 @@ $$ (eq:consumption_rates) The term in brackets is the difference in *subjective* expected portfolio returns — what agent 1 believes she earns relative to agent 2. -The prefactor $(1-\rho)/\rho$ translates this perceived return advantage into a saving response. +The factor $(1-\rho)/\rho$ translates this perceived return advantage into a saving response. -- When IES $> 1$ ($\rho < 1$), the prefactor is positive: a higher perceived return makes the agent save more, because the substitution effect dominates the income effect. -- When IES $< 1$ ($\rho > 1$), the prefactor is negative: the income effect dominates and the agent saves less, working against survival. +- When IES $> 1$ ($\rho < 1$), the factor is positive: a higher perceived return makes the agent save more, because the substitution effect dominates the income effect. +- When IES $< 1$ ($\rho > 1$), the factor is negative: the income effect dominates and the agent saves less, working against survival. - When IES $= 1$ ($\rho = 1$), the two effects cancel and the saving channel vanishes entirely. This is the channel through which recursive preferences alter survival outcomes by separating $\gamma$ from $\rho$. @@ -482,7 +484,9 @@ This is the channel through which recursive preferences alter survival outcomes Derive {eq}`eq:consumption_rates`. -In the homogeneous economy populated by agent 2, the consumption-wealth ratio is $(y(0))^{-1} = \beta - (1-\rho)\mu_V^2$, where $\mu_V^2$ is agent 2's expected log return on wealth. Agent 1, as a negligible price-taker, has consumption-wealth ratio $(y^1)^{-1} = \beta - (1-\rho)\mu_V^1$, where $\mu_V^1$ is her own expected log return. +In the homogeneous economy populated by agent 2, the consumption-wealth ratio is $(y(0))^{-1} = \beta - (1-\rho)\mu_V^2$, where $\mu_V^2$ is agent 2's expected log return on wealth. + +Agent 1, as a negligible price-taker, has consumption-wealth ratio $(y^1)^{-1} = \beta - (1-\rho)\mu_V^1$, where $\mu_V^1$ is her own expected log return. Use $(y^2)^{-1} - (y^1)^{-1} = (1-\rho)(\mu_V^1 - \mu_V^2)$ and express $\mu_V^1 - \mu_V^2$ in terms of agent 1's *subjective* expected excess return. @@ -530,22 +534,6 @@ The survival conditions do not depend on $\beta$ or $\mu_Y$, which affect the le def portfolio_return_diff(ω_1, ω_2, γ, σ_y): """ Difference in expected log portfolio returns at the boundary. - - Parameters - ---------- - ω_1 : float - Belief distortion of agent 1 - ω_2 : float - Belief distortion of agent 2 - γ : float - Risk aversion - σ_y : float - Endowment volatility - - Returns - ------- - float - Difference in expected log portfolio returns """ Δω = ω_1 - ω_2 risky_share_diff = Δω / (γ * σ_y) @@ -557,21 +545,6 @@ def portfolio_return_diff(ω_1, ω_2, γ, σ_y): def saving_channel(ω_1, ω_2, γ, ρ, σ_y): """ Difference in consumption-wealth ratios at the boundary. - - Parameters - ---------- - ω_1, ω_2 : float - Belief distortions - γ : float - Risk aversion - ρ : float - Inverse of IES - σ_y : float - Endowment volatility - - Returns - ------- - float """ Δω = ω_1 - ω_2 subjective_return_diff = Δω * σ_y + Δω**2 / (2 * γ) @@ -583,22 +556,6 @@ def boundary_drift(ω_1, ω_2, γ, ρ, σ_y): Boundary drift m_ϑ when agent 1 becomes negligible. Positive drift means agent 1 survives (repelling boundary). - - Parameters - ---------- - ω_1, ω_2 : float - Belief distortions of agents 1 and 2 - γ : float - Risk aversion - ρ : float - Inverse of IES - σ_y : float - Endowment volatility - - Returns - ------- - float - Drift at v = 0 """ return γ * ( portfolio_return_diff(ω_1, ω_2, γ, σ_y) @@ -608,7 +565,7 @@ def boundary_drift(ω_1, ω_2, γ, ρ, σ_y): ## Survival regions -A central contribution of {cite}`Borovicka2020` is the characterization of survival regions in the $(\gamma, \rho)$ plane. +A central contribution of {cite:t}`Borovicka2020` is the characterization of survival regions in the $(\gamma, \rho)$ plane. Under separable preferences, $\gamma = \rho$, the agent with more accurate beliefs always dominates. @@ -625,11 +582,6 @@ def compute_survival_boundary(ω_1, ω_2, σ_y, γ_grid, boundary="lower"): For boundary='lower', agent 1 is the small agent. For boundary='upper', agent 2 is the small agent. - - Returns - ------- - np.ndarray - Boundary values of ρ as a function of γ """ ρ_boundary = [] @@ -730,8 +682,8 @@ for idx, (case, value, label) in enumerate(panel_specs): ax.set_title(label, fontsize=12) ax.set_xlim(0, 6) ax.set_ylim(0, 2) - ax.set_xlabel(r"risk aversion $\gamma$") - ax.set_ylabel(r"inverse IES $\rho$") + ax.set_xlabel(r"$\gamma$") + ax.set_ylabel(r"$\rho$") axes[0, 0].legend(loc="upper left", fontsize=9) plt.tight_layout() @@ -745,11 +697,11 @@ Each panel plots two curves in the $(\gamma, \rho)$ plane for a different value - The shaded region between the two curves is where both agents survive. - The dotted diagonal $\gamma = \rho$ is the separable CRRA case, along which the agent with more accurate beliefs always dominates. -Moderate optimism ($\omega^1 = 0.10$) produces a narrow coexistence region. +Moderate optimism ($\omega^1 = 0.10$) produces a wide coexistence region that extends across most of the $\gamma$ range. -Stronger optimism ($\omega^1 = 0.20$) widens it substantially. +Stronger optimism ($\omega^1 = 0.20$) narrows the region: the agent 2 boundary shifts out of the plotted range for moderate and large $\gamma$, shrinking the set of $(\gamma, \rho)$ pairs where both agents coexist. -In the limit $|\omega^1|/\sigma_Y \to \infty$ (bottom-left), the boundaries simplify to closed-form expressions. +In the limit $|\omega^1|/\sigma_Y \to \infty$ (bottom-left), the boundaries simplify to closed-form expressions and the coexistence region contracts further, existing only for $\gamma < 1$. Pessimistic distortions ($\omega^1 = -0.25$, bottom-right) can also survive, but only in a much narrower part of the parameter space. @@ -892,11 +844,11 @@ This figure plots the two boundary drifts as a function of $\gamma$ ($\omega^1 = The figure illustrates asymptotic result 1. -For small $\gamma$, the blue curve is negative and the red curve is also negative. +For small $\gamma$, the blue curve is negative and the red curve is positive. -Both drifts push the Pareto share in the same direction — toward $\upsilon = 1$ — so whichever agent happens to get ahead early will dominate. +Both boundaries are attracting: near $\upsilon = 0$ the negative drift pulls $\upsilon$ toward 0, and near $\upsilon = 1$ the positive drift pushes $\upsilon$ toward 1. -This is outcome (d) in {prf:ref}`survival_conditions`: neither boundary is repelling, so each agent dominates with strictly positive probability depending on the realized Brownian path. +This is outcome (d) in {prf:ref}`survival_conditions`: neither boundary is repelling, so whichever agent happens to get ahead early will dominate, with each agent having strictly positive probability of dominance depending on the realized Brownian path. As $\gamma$ increases past roughly 1, the blue curve crosses zero and becomes positive while the red curve stays negative. @@ -969,22 +921,6 @@ Agent 2 has correct beliefs, so the log-odds drift is negative and all paths tre Agent 1 is driven to extinction — the classical market-selection result of {cite:t}`Blume_Easley2006`. -## Constant aggregate endowment - -Section IV.D.3 studies the limiting case in which aggregate endowment is constant, so $\mu_Y = \sigma_Y = 0$. - -In the notation of the paper, this case is equivalent to the limit $|\omega^1| / \sigma_Y \to \infty$ studied in the bottom-left panel of figure 2. - -The point of the exercise is that the survival results do not rely on unbounded aggregate endowment. - -Even with deterministic aggregate consumption, agents can trade for purely speculative motives in complete markets. - -As the negligible agent faces prices generated by the large agent, she can choose a speculative portfolio with a high *subjective* expected return. - -When IES is above one, that high perceived return raises her saving rate and can allow her to outsave extinction. - -This is the pure saving channel in isolation. - ## Asset pricing implications As one agent becomes negligible, current prices converge to those of the homogeneous economy populated by the large agent. @@ -1169,7 +1105,7 @@ def simulate_pareto_share_toy(ω_1, ω_2, γ, ρ, σ_y, T, dt, n_paths=20, seed= ω_1 = 0.25 ω_2 = 0.0 -γ = 10.0 +γ = 5.0 ρ = 0.67 σ_y = 0.02 @@ -1204,7 +1140,7 @@ plt.tight_layout() plt.show() ``` -The left panel shows 20 sample paths of the Pareto share $\upsilon_t$ under parameters inside the coexistence region ($\omega^1 = 0.25$, $\omega^2 = 0$, $\gamma = 10$, IES $\approx 1.49$). +The left panel shows 20 sample paths of the Pareto share $\upsilon_t$ under parameters inside the coexistence region ($\omega^1 = 0.25$, $\omega^2 = 0$, $\gamma = 5$, IES $\approx 1.49$). Unlike the separable case in {numref}`fig-crra-pareto-paths`, the paths do not drift to zero — they repeatedly visit a wide range of values, bouncing between the two repelling boundaries. From af32f43797d1002b8241332859dbc9749d82f0a1 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 2 Apr 2026 23:44:12 +1100 Subject: [PATCH 12/20] updates --- lectures/survival_recursive_preferences.md | 167 ++++++++++++++++----- 1 file changed, 127 insertions(+), 40 deletions(-) diff --git a/lectures/survival_recursive_preferences.md b/lectures/survival_recursive_preferences.md index df250e243..51b00a749 100644 --- a/lectures/survival_recursive_preferences.md +++ b/lectures/survival_recursive_preferences.md @@ -274,17 +274,56 @@ where $m$ and $s$ are the drift and diffusion of the Pareto share. This is the continuous-time analogue of $\beta \, \mathbb{E}[\tilde{J}(\upsilon')] - \tilde{J}(\upsilon)$: it measures how the value function drifts and fluctuates as $\upsilon$ evolves. -Setting flow payoff plus expected capital gain equal to zero gives the HJB equation: +Setting flow payoff plus expected capital gain equal to zero gives the schematic HJB equation: $$ 0 = \sup_{(z^1,z^2,\nu^1,\nu^2)} \left\{ \upsilon F(z^1, \nu^1) + (1-\upsilon) F(z^2, \nu^2) + \mathcal{L} \tilde{J}(\upsilon) \right\} +$$ (eq:hjb_sketch) + +subject to $z^1 + z^2 \leq 1$. + +#### Exact reduced ODE + +Proposition 2.3 of {cite:t}`Borovicka2020` gives the exact HJB equation after substituting the homogeneity reduction $J(\tilde{\lambda}, Y) = (\tilde{\lambda}^1 + \tilde{\lambda}^2) Y^{1-\gamma} \tilde{J}(\upsilon)$ and the dynamics of $\upsilon$ and $Y$: + +$$ +0 = \sup_{(z^1, z^2, \nu^1, \nu^2)} \; +\upsilon \, F(z^1, \nu^1) + (1 - \upsilon) \, F(z^2, \nu^2) $$ (eq:hjb) +$$ ++ \left[ +-\upsilon \nu^1 - (1-\upsilon)\nu^2 ++ \bigl(\upsilon \omega^1 + (1-\upsilon)\omega^2\bigr)(1-\gamma)\sigma_Y ++ (1-\gamma)\mu_Y ++ \tfrac{1}{2}(1-\gamma)^2 \sigma_Y^2 +\right] \tilde{J}(\upsilon) +$$ + +$$ ++ \upsilon(1-\upsilon) +\left[\nu^2 - \nu^1 + (\omega^1 - \omega^2)(1-\gamma)\sigma_Y\right] +\tilde{J}'(\upsilon) +$$ + +$$ ++ \tfrac{1}{2}\upsilon^2(1-\upsilon)^2 (\omega^1 - \omega^2)^2 \, +\tilde{J}''(\upsilon) +$$ + subject to $z^1 + z^2 \leq 1$. -This is the continuous-time counterpart of the discrete-time planner's problem in {cite:t}`Blume_Easley2006` (see also {doc}`likelihood_ratio_process_2`). +The first line is the flow payoff from the two agents' felicity functions. + +The second line multiplies $\tilde{J}(\upsilon)$ by a term that combines the agents' discount rates, belief-weighted endowment drift, and a variance correction — these arise from absorbing the $Y^{1-\gamma}$ factor via Itô's lemma. -The boundary conditions match the continuation values of the corresponding homogeneous economies. +The third line multiplies $\tilde{J}'(\upsilon)$ by the drift of the Pareto share, which depends on the difference in discount rates and the belief-weighted response to endowment risk. + +The fourth line multiplies $\tilde{J}''(\upsilon)$ by the squared diffusion of the Pareto share. + +The boundary conditions are $\tilde{J}(0) = \tilde{V}^2$ and $\tilde{J}(1) = \tilde{V}^1$, where $\tilde{V}^n$ is the continuation value in the homogeneous economy populated by agent $n$ alone. + +This is the continuous-time counterpart of the discrete-time planner's problem in {cite:t}`Blume_Easley2006` (see also {doc}`likelihood_ratio_process_2`). ## Survival conditions @@ -422,9 +461,9 @@ Derive {eq}`eq:portfolio_returns`. At the boundary $\upsilon \searrow 0$, agent $n$'s optimal risky-asset share is $\pi^n = 1 + (\omega^n - \omega^2)/(\gamma \sigma_Y)$ (see {eq}`eq:portfolio`). -The expected log return on the risky asset under $P$ is $\mu_R = \mu_Y + \gamma \sigma_Y^2 - \omega^2 \sigma_Y - \frac{1}{2}\sigma_Y^2$, and the risk-free rate is $r$. +Let $\bar{\mu}_R = \mu_Y + \gamma \sigma_Y^2 - \omega^2 \sigma_Y$ denote the expected return on the risky asset under $P$, and $r$ the risk-free rate. - The expected log portfolio return is $m_R^n = r + \pi^n(\mu_R - r) - \frac{1}{2}(\pi^n)^2 \sigma_Y^2$. +The continuously rebalanced portfolio has expected log return $m_R^n = r + \pi^n(\bar{\mu}_R - r) - \frac{1}{2}(\pi^n)^2 \sigma_Y^2$. Compute $m_R^1 - m_R^2$ and simplify. ``` @@ -433,18 +472,18 @@ Compute $m_R^1 - m_R^2$ and simplify. :class: dropdown ``` -Using $m_R^n = r + \pi^n(\mu_R - r) - \frac{1}{2}(\pi^n)^2 \sigma_Y^2$, the difference is +Using $m_R^n = r + \pi^n(\bar{\mu}_R - r) - \frac{1}{2}(\pi^n)^2 \sigma_Y^2$, the difference is $$ -m_R^1 - m_R^2 = (\pi^1 - \pi^2)(\mu_R - r) - \tfrac{1}{2}[(\pi^1)^2 - (\pi^2)^2]\sigma_Y^2. +m_R^1 - m_R^2 = (\pi^1 - \pi^2)(\bar{\mu}_R - r) - \tfrac{1}{2}[(\pi^1)^2 - (\pi^2)^2]\sigma_Y^2. $$ The difference in risky shares is $\pi^1 - \pi^2 = (\omega^1 - \omega^2)/(\gamma \sigma_Y)$. -The equity premium (log) is $\mu_R - r = \gamma \sigma_Y^2 - \omega^2 \sigma_Y - \frac{1}{2}\sigma_Y^2$, but for the first term we only need the product: +The arithmetic equity premium is $\bar{\mu}_R - r = \gamma \sigma_Y^2 - \omega^2 \sigma_Y$, so: $$ -(\pi^1 - \pi^2)(\mu_R - r) = \frac{\omega^1 - \omega^2}{\gamma \sigma_Y} \cdot (\gamma \sigma_Y^2 - \omega^2 \sigma_Y) - \frac{\omega^1 - \omega^2}{2\gamma}. +(\pi^1 - \pi^2)(\bar{\mu}_R - r) = \frac{\omega^1 - \omega^2}{\gamma \sigma_Y} \cdot (\gamma \sigma_Y^2 - \omega^2 \sigma_Y). $$ For the volatility term, write $(\pi^1)^2 - (\pi^2)^2 = (\pi^1 - \pi^2)(\pi^1 + \pi^2)$ and note $\pi^1 + \pi^2 = 2 + (\omega^1 + \omega^2 - 2\omega^2)/(\gamma \sigma_Y)$. @@ -627,14 +666,16 @@ def compute_limit_boundary(γ_grid, boundary="lower"): ```{code-cell} ipython3 --- +tags: [hide-input] mystnb: figure: caption: Survival regions corresponding to Figure 2 in Borovicka (2020) name: fig-survival-regions --- σ_y = 0.02 -γ_grid = np.linspace(0.01, 6.0, 500) -ρ_max = 2.0 +γ_vals = np.linspace(0.01, 6.0, 500) +ρ_vals = np.linspace(0.01, 2.0, 400) +G, R = np.meshgrid(γ_vals, ρ_vals) panel_specs = [ ("finite", 0.10, r"$\omega^1 = 0.10$"), @@ -649,36 +690,75 @@ for idx, (case, value, label) in enumerate(panel_specs): ax = axes.flat[idx] if case == "limit": - ρ_1 = compute_limit_boundary(γ_grid, boundary="lower") - ρ_2 = compute_limit_boundary(γ_grid, boundary="upper") + ρ_1 = compute_limit_boundary(γ_vals, boundary="lower") + ρ_2 = compute_limit_boundary(γ_vals, boundary="upper") + # Limit boundary drifts: use closed-form expressions + # m0 > 0 (agent 1 survives) when ρ < γ/(1+γ) + m0 = G - (1 + G) * R + # m1 < 0 (agent 2 survives) when ρ < γ/(1-γ) for γ<1, always for γ>=1 + m1 = (1 - G) * R - G else: - ρ_1 = compute_survival_boundary(value, 0.0, σ_y, γ_grid, boundary="lower") - ρ_2 = compute_survival_boundary(value, 0.0, σ_y, γ_grid, boundary="upper") + ρ_1 = compute_survival_boundary(value, 0.0, σ_y, γ_vals, + boundary="lower") + ρ_2 = compute_survival_boundary(value, 0.0, σ_y, γ_vals, + boundary="upper") + # Evaluate boundary drifts on the grid + m0 = boundary_drift(value, 0.0, G, R, σ_y) + m1 = -boundary_drift(0.0, value, G, R, σ_y) + + # Classify all four regions + both = (m0 > 0) & (m1 < 0) + ag1_dom = (m0 > 0) & (m1 > 0) + ag2_dom = (m0 < 0) & (m1 < 0) + either = (m0 < 0) & (m1 > 0) + + # Shade coexistence region + ax.contourf(G, R, both.astype(float), levels=[0.5, 1.5], + colors=["C2"], alpha=0.18) + if idx == 0: + ax.fill_between([], [], color="C2", alpha=0.18, + label="both survive") + + # Plot boundary curves + ax.contour(G, R, m0, levels=[0], colors=["C0"], + linestyles="--", linewidths=2) + ax.contour(G, R, m1, levels=[0], colors=["C3"], + linestyles="-", linewidths=2) + if idx == 0: + ax.plot([], [], "--", color="C0", lw=2, label="agent 1 boundary") + ax.plot([], [], "-", color="C3", lw=2, label="agent 2 boundary") - ρ_1 = np.where((ρ_1 > 0) & (ρ_1 < ρ_max), ρ_1, np.nan) - ρ_2 = np.where((ρ_2 > 0) & (ρ_2 < ρ_max), ρ_2, np.nan) - - lower = np.minimum(ρ_1, ρ_2) - upper = np.maximum(ρ_1, ρ_2) - mask = np.isfinite(lower) & np.isfinite(upper) - - ax.fill_between( - γ_grid, lower, upper, where=mask, color="C2", alpha=0.18, - label="both survive" if idx == 0 else None - ) - ax.plot( - γ_grid, ρ_1, "--", color="C0", lw=2, - label="agent 1 boundary" if idx == 0 else None - ) - ax.plot( - γ_grid, ρ_2, "-", color="C3", lw=2, - label="agent 2 boundary" if idx == 0 else None - ) ax.plot( - γ_grid, γ_grid, ":", color="black", lw=2, + γ_vals, γ_vals, ":", color="black", lw=2, label=r"$\gamma = \rho$" if idx == 0 else None ) + tkw = dict(ha="center", va="center", style="italic", color="0.15") + if case == "finite" and value == 0.10: + ax.text(0.31, 1.05, "either agent dominates", rotation=90, + fontsize=10, **tkw) + ax.text(1.8, 1.55, "agent 2\ndominates", fontsize=11, **tkw) + ax.text(3.5, 0.75, "both\nsurvive", fontsize=11, **tkw) + if ag1_dom.any(): + ax.text(5.0, 0.25, "agent 1\ndominates", fontsize=11, **tkw) + elif case == "finite" and value == 0.20: + ax.text(0.31, 1.05, "either agent dominates", rotation=90, + fontsize=10, **tkw) + ax.text(2.5, 1.55, "agent 2\ndominates", fontsize=11, **tkw) + ax.text(3.8, 0.55, "both\nsurvive", fontsize=11, **tkw) + if ag1_dom.any(): + ax.text(5.2, 0.08, "agent 1\ndominates", fontsize=9, **tkw) + elif case == "limit": + ax.text(0.31, 1.05, "either agent dominates", rotation=90, + fontsize=10, **tkw) + ax.text(3.0, 1.40, "agent 2\ndominates", fontsize=11, **tkw) + ax.text(3.5, 0.30, "both\nsurvive", fontsize=11, **tkw) + elif case == "finite" and value == -0.25: + ax.text(0.31, 1.05, "either agent dominates", rotation=90, + fontsize=10, **tkw) + ax.text(3.5, 1.20, "agent 2\ndominates", fontsize=11, **tkw) + ax.text(2.5, 0.18, "both\nsurvive", fontsize=11, **tkw) + ax.set_title(label, fontsize=12) ax.set_xlim(0, 6) ax.set_ylim(0, 2) @@ -701,7 +781,9 @@ Moderate optimism ($\omega^1 = 0.10$) produces a wide coexistence region that ex Stronger optimism ($\omega^1 = 0.20$) narrows the region: the agent 2 boundary shifts out of the plotted range for moderate and large $\gamma$, shrinking the set of $(\gamma, \rho)$ pairs where both agents coexist. -In the limit $|\omega^1|/\sigma_Y \to \infty$ (bottom-left), the boundaries simplify to closed-form expressions and the coexistence region contracts further, existing only for $\gamma < 1$. +In the limit $|\omega^1|/\sigma_Y \to \infty$ (bottom-left), the boundaries simplify to closed-form expressions. + +The coexistence region narrows but extends to large $\gamma$ values below the agent 2 boundary curve. Pessimistic distortions ($\omega^1 = -0.25$, bottom-right) can also survive, but only in a much narrower part of the parameter space. @@ -726,7 +808,7 @@ def decompose_survival(ω_1, ω_2, γ_grid, ρ, σ_y): ω_2 = 0.0 ρ = 0.67 σ_y = 0.02 -γ_grid = np.linspace(0.5, 25.0, 300) +γ_grid = np.linspace(0.5, 15.0, 300) risk_term, vol_term, save_term, total = decompose_survival( ω_1, ω_2, γ_grid, ρ, σ_y @@ -796,7 +878,9 @@ plt.show() Each panel shows the same three-term decomposition as the previous figure, but now for three different values of the IES ($\omega^1 = 0.25$, $\omega^2 = 0$, $\sigma_Y = 0.02$). - Left panel (IES $= 0.5$): the saving term is negative, so the optimistic agent actually saves less, working against survival. -- Center panel (IES $= 1.0$): the saving term vanishes entirely, reproducing the separable benchmark. +- Center panel (IES $= 1.0$): the saving term vanishes entirely, so only the portfolio return and volatility channels remain. + + - This eliminates the saving channel but does not by itself reproduce the full separable CRRA benchmark, which requires $\gamma = \rho$ (i.e., IES $= 1/\gamma$), not merely $\rho = 1$. - Right panel (IES $= 1.5$): the saving term is positive and shifts the total drift upward, expanding the range of $\gamma$ values for which the optimistic agent survives. ## Asymptotic results @@ -806,6 +890,7 @@ Borovicka derives several useful asymptotic results. 1. As $\gamma \searrow 0$, each agent dominates with strictly positive probability. 1. As $\gamma \nearrow \infty$, the relatively more optimistic agent dominates. 1. As $\rho \searrow 0$, the relatively more optimistic agent always survives. + - The relatively more pessimistic agent can also survive when risk aversion is sufficiently low. 1. As $\rho \nearrow \infty$, a nondegenerate long-run equilibrium cannot exist. The next figure illustrates the first result by plotting both boundary drifts as $\gamma$ becomes small. @@ -925,7 +1010,7 @@ Agent 1 is driven to extinction — the classical market-selection result of {ci As one agent becomes negligible, current prices converge to those of the homogeneous economy populated by the large agent. -When agent 2 is the large agent, proposition 5.1 implies +When agent 2 is the large agent, Proposition 5.1 in {cite:t}`Borovicka2020` implies $$ \lim_{\upsilon \searrow 0} r(\upsilon) @@ -1146,7 +1231,9 @@ Unlike the separable case in {numref}`fig-crra-pareto-paths`, the paths do not d The right panel approximates the stationary density by pooling the second half of longer simulations. -The interior mode confirms that neither agent is driven to extinction and the economy sustains a nondegenerate long-run distribution of wealth shares. +The interior mode is consistent with neither agent being driven to extinction. + +However, this toy interpolation only illustrates the recurrence logic; it does not reproduce the quantitative stationary consumption-share density in Figure 4 of {cite:t}`Borovicka2020`, which requires solving the full interior equilibrium ODE. ## Summary From 18071eb2678e282b2e955037f45b1bac72c6059b Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Fri, 3 Apr 2026 12:52:57 -0400 Subject: [PATCH 13/20] Tom's April 3 edits of blackwell_kihlstrom.md lecture --- lectures/_static/quant-econ.bib | 44 +++++ lectures/blackwell_kihlstrom.md | 291 +++++++++++++++++++++++++++-- lectures/enet_binomial.csv | 200 ++++++++++++++++++++ lectures/enet_poisson.csv | 200 ++++++++++++++++++++ lectures/gee_simulation_check.txt | 0 lectures/lasso_data.csv | 300 ++++++++++++++++++++++++++++++ lectures/y_arma_data.csv | 251 +++++++++++++++++++++++++ 7 files changed, 1273 insertions(+), 13 deletions(-) create mode 100644 lectures/enet_binomial.csv create mode 100644 lectures/enet_poisson.csv create mode 100644 lectures/gee_simulation_check.txt create mode 100644 lectures/lasso_data.csv create mode 100644 lectures/y_arma_data.csv diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 3c469fff9..7ce311876 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -3859,3 +3859,47 @@ @article{AngPiazzesi2003 number = {4}, pages = {745--787} } + +@article{csiszar1963, + author = {Csisz{\'a}r, Imre}, + title = {{Eine informationstheoretische Ungleichung und ihre Anwendung auf den Beweis der Ergodizit{\"a}t von Markoffschen Ketten}}, + journal = {Magyar Tud. Akad. Mat. Kutat{\'o} Int. K{\"o}zl.}, + year = 1963, + volume = {8}, + pages = {85--108} +} + +@article{ali1966, + author = {Ali, S. M. and Silvey, S. D.}, + title = {{A general class of coefficients of divergence of one distribution from another}}, + journal = {Journal of the Royal Statistical Society, Series B}, + year = 1966, + volume = {28}, + number = {1}, + pages = {131--142} +} + +@book{chentsov1981, + author = {{\v{C}}encov, Nikolai N.}, + title = {{Statistical Decision Rules and Optimal Inference}}, + series = {Translations of Mathematical Monographs}, + volume = {53}, + publisher = {American Mathematical Society}, + address = {Providence, RI}, + year = 1981 +} + +@inproceedings{tishby_pereira_bialek1999, + author = {Tishby, Naftali and Pereira, Fernando C. and Bialek, William}, + title = {{The Information Bottleneck Method}}, + booktitle = {Proceedings of the 37th Annual Allerton Conference on Communication, Control, and Computing}, + year = 1999, + pages = {368--377} +} + +@article{shwartz_ziv_tishby2017, + author = {Shwartz-Ziv, Ravid and Tishby, Naftali}, + title = {{Opening the Black Box of Deep Neural Networks via Information}}, + journal = {arXiv preprint arXiv:1703.00810}, + year = 2017 +} diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index 49951f1d3..29658374e 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -34,38 +34,37 @@ This lecture studies *Blackwell's theorem* {cite}`blackwell1951,blackwell1953` o Our presentation brings in findings from a Bayesian interpretation of Blackwell's theorem by {cite}`kihlstrom1984`. -Blackwell and Kihlstrom study questions closely related to those encountered in this QuantEcon lecture {doc}`likelihood_bayes`. +Blackwell and Kihlstrom study statistical model-selection questions closely related to those encountered in this QuantEcon lecture {doc}`likelihood_bayes`. -To appreciate the connection involved, it is helpful up front to appreciate how Blackwell's notion of +To appreciate the connection involved, it is helpful to appreciate how Blackwell's notion of an **experiment** is related to the concept of a ''probability distribution'' or ''parameterized statistical model'' appearing in {doc}`likelihood_bayes` -Blackwell studies a situation in which a decision maker wants to know a state $s$ living in a space $S$. +Blackwell studies a situation in which a decision maker wants to know the value of a state $s$ that lives in a space $S$. -For Blackwell, an **experiment** is a **conditional probability model** $\{\mu(\cdot \mid s) : s \in S\}$, i.e., a family of distributions indexed by the unknown state. +For Blackwell, an **experiment** is a **conditional probability model** $\{\mu(\cdot \mid s) : s \in S\}$, i.e., a family of probability distributions that are conditioned by the same state $s \in S$. -We are free to interpret "state" as "parameter". +We are free to interpret the "state" as a "parameter" or "parameter vector". -In a two-state case $S = \{s_1, s_2\}$, the two conditional densities $f(\cdot) = \mu(\cdot \mid s_1)$ and $g(\cdot) = \mu(\cdot \mid s_2)$ are the ones used repeatedly in our studies of classical hypothesis testing and Bayesian inference in this suite of QuantEcon lectures. +In a two-state case $S = \{s_1, s_2\}$, the two conditional densities $f(\cdot) = \mu(\cdot \mid s_1)$ and $g(\cdot) = \mu(\cdot \mid s_2)$ are the ones used repeatedly in our studies of classical hypothesis testing and Bayesian inference in this QuantEcon lecture {doc}`likelihood_bayes` as well as several other lectures in this suite of QuantEcon lectures. -Blackwell's question — *which experiment is more informative?* — is about which conditional probability model allows a Bayesian with a prior over $\{s_1, s_2\}$ to learn more about which model governs the world. +{cite}`kihlstrom1984` interprets the question — *which experiment is more informative?* — as asking which conditional probability model allows a Bayesian decision maker with a prior over $\{s_1, s_2\}$ to gather higher expected utility. +We'll use the terms "signal" and "experiment" as synyomyms. Thus, suppose that two signals, $\tilde{x}_\mu$ and $\tilde{x}_\nu$, are both informative about an unknown state $\tilde{s}$. -Blackwell's question is which signal is more informative. - -Experiment $\mu$ is **at least as informative as** experiment $\nu$ if every Bayesian decision maker can attain weakly higher expected utility with $\mu$ than with $\nu$. +Signal $\mu$ is **at least as informative as** signal $\nu$ if every Bayesian decision maker can attain weakly higher expected utility with $\mu$ than with $\nu$. This economic criterion is equivalent to two statistical criteria: - *Sufficiency* (Blackwell): $\tilde{x}_\nu$ can be generated from $\tilde{x}_\mu$ by an additional randomization. - *Uncertainty reduction* (DeGroot {cite}`degroot1962`): $\tilde{x}_\mu$ lowers expected uncertainty at least as much as $\tilde{x}_\nu$ for every concave uncertainty function. -Kihlstrom's reformulation focuses on the *posterior distribution*. +Kihlstrom's formulation focuses on the *posterior distribution*. More informative experiments generate posterior distributions that are more dispersed in convex order. -In the two-state case, this becomes the familiar mean-preserving-spread comparison on $[0, 1]$, which can be checked with the integrated-CDF test used for second-order stochastic dominance. +In the two-state case, this becomes a mean-preserving-spread comparison on $[0, 1]$, which can be checked with the integrated-CDF test used for second-order stochastic dominance. The lecture proceeds as follows: @@ -124,7 +123,7 @@ print("Row sums ν:", ν.sum(axis=1)) ### Stochastic transformations (Markov kernels) -A **stochastic transformation** $Q$ maps signals of one experiment to signals of another by further randomization. +A **stochastic transformation** $Q$ maps signals from one experiment to signals from another by further randomization. In the discrete setting with $M$ input signals and $K$ output signals, $Q$ is an $M \times K$ Markov matrix: $q_{lk} \geq 0$ and $\sum_k q_{lk} = 1$ for every row $l$. @@ -1178,6 +1177,272 @@ In the two-state case this reduces to the familiar mean-preserving-spread compar DeGroot's contribution is to extend the comparison from particular utility functions to the full class of concave uncertainty functions. +## The Data Processing Inequality and Coarse-Graining + +Blackwell's garbling condition — that $\nu = \mu Q$ for some Markov kernel $Q$ — is the same mathematical operation that underlies the **data processing inequality** (DPI) and the **coarse-graining theorem** in information theory, information geometry, and machine learning. + +### The DPI for f-divergences + +An **f-divergence** between two probability distributions $P$ and $Q$ over a finite space $\Omega$ is + +$$ +D_f(P \| Q) = \sum_{\omega \in \Omega} q_\omega \, f\!\left(\frac{p_\omega}{q_\omega}\right), +$$ + +where $f : (0,\infty) \to \mathbb{R}$ is a convex function with $f(1) = 0$. + +Special cases include: + +| Divergence | Generator $f(t)$ | +|:---|:---| +| KL-divergence | $t \log t$ | +| Squared Hellinger $H^2$ | $(\sqrt{t} - 1)^2 / 2$ | +| Total variation TV | $\lvert t - 1 \rvert / 2$ | +| Chi-squared $\chi^2$ | $(t-1)^2$ | + +The class of f-divergences was introduced independently by {cite}`csiszar1963` and Morimoto (1963); see also {cite}`ali1966`. + +```{admonition} Coarse-Graining Theorem / Data Processing Inequality +:class: important +For any f-divergence $D_f$ and any Markov kernel (stochastic transformation) +$\kappa$ — with $P \kappa$ denoting the image of $P$ under $\kappa$ — we have + +$$ +D_f(P \| Q) \geq D_f(P\kappa \| Q\kappa). +$$ + +Equality holds if and only if $\kappa$ is induced by a sufficient statistic for +the pair $\{P, Q\}$. +``` + +The proof follows from Jensen's inequality applied to the convex function $f$, using the fact that $\kappa$ is a stochastic matrix {cite}`csiszar1963`. + +### Connection to Blackwell's sufficiency condition + +In Blackwell's framework, $\mu$ and $\nu$ are experiments over the same state space $S = \{s_1, \ldots, s_N\}$. + +For two states, each experiment has two rows: $\mu_1 = \mu(s_1, \cdot)$ and $\mu_2 = \mu(s_2, \cdot)$. + +If $\nu = \mu Q$ (i.e., $\nu$ is a garbling of $\mu$), then the pair $(\nu_1, \nu_2) = (\mu_1 Q, \mu_2 Q)$ is obtained by applying the Markov kernel $Q$ to the pair $(\mu_1, \mu_2)$. + +The coarse-graining theorem then implies immediately: + +$$ +D_f(\mu_1 \| \mu_2) \geq D_f(\nu_1 \| \nu_2) +\quad \text{for every f-divergence } D_f, +$$ + +whenever $\mu \geq \nu$ in the Blackwell order. + +So a more informative experiment always produces *more separated* conditional signal distributions, in the sense of every f-divergence simultaneously. + +The DPI is thus a statement about the *distinguishability* of states: garbling an experiment makes the states harder to tell apart under every statistical measure of separability. + +The equality condition links the DPI directly back to Blackwell: $D_f(\mu_1 Q \| \mu_2 Q) = D_f(\mu_1 \| \mu_2)$ for some (hence every) strictly convex $f$ if and only if $Q$ is a sufficient statistic for $(\mu_1, \mu_2)$, i.e., the garbling discards nothing relevant. + +### Information geometry: Chentsov's theorem + +The DPI has an infinitesimal, differential-geometric companion. + +**Chentsov's theorem** {cite}`chentsov1981` states that the **Fisher information matrix** $I_F(\theta)$ is, up to a constant rescaling, the *unique* Riemannian metric on a statistical manifold that contracts under every Markov morphism (coarse-graining): + +$$ +I_F(\theta;\, \mu) \succeq I_F(\theta;\, \mu\kappa) +\quad \text{for every differentiable family } \{\mu_\theta\} \text{ and every Markov kernel } \kappa. +$$ + +Equality holds if and only if $\kappa$ is a sufficient statistic for $\theta$. + +The uniqueness clause is deep: it says that the Fisher information is not merely *one* metric that happens to contract under coarse-graining, but the *only one* with that property. + +See Amari and Nagaoka (2000) for a thorough treatment of information geometry and its connections to sufficiency. + +### The information bottleneck in machine learning + +The **information bottleneck** method of {cite}`tishby_pereira_bialek1999` provides a prominent application of the DPI in machine learning. + +Given a joint distribution $p(X, Y)$ over an input $X$ and a target $Y$, the goal is to find a compressed representation $T$ — formed by a stochastic mapping $p(T \mid X)$ — that retains as much information about $Y$ as possible while using as few bits as possible to describe $X$. + +The method minimizes the Lagrangian + +$$ +\mathcal{L}[p(T \mid X)] = I(X;\, T) - \beta \, I(T;\, Y), +$$ + +where $I(\cdot\,;\,\cdot)$ denotes mutual information and $\beta \geq 0$ governs the compression–relevance trade-off. + +Because $Y - X - T$ forms a Markov chain (T is derived from X alone), the DPI implies + +$$ +I(T;\, Y) \leq I(X;\, Y), +$$ + +with equality if and only if $T$ is a **sufficient statistic** for $Y$ given $X$. + +The Blackwell ordering explains why no deterministic or random post-processing of $X$ can increase the mutual information with $Y$: any Markov kernel applied to $X$ is a garbling in Blackwell's sense, and the DPI is the mutual-information form of the coarse-graining theorem. + +In machine learning language the information bottleneck searches among all garblings of $X$ for the one that best preserves relevant information about $Y$ subject to a compression budget. + +In a deep neural network with input $X$ and target $Y$ and layers $X \to T_1 \to T_2 \to \cdots \to T_L \to \hat{Y}$, each layer's representation is a garbling of the previous one. +The DPI then implies the chain of inequalities + +$$ +I(X;\, Y) \geq I(T_1;\, Y) \geq I(T_2;\, Y) \geq \cdots \geq I(T_L;\, Y), +$$ + +so successive layers can only lose, never gain, information about $Y$. +This observation was placed at the center of the study of what deep networks learn by {cite}`shwartz_ziv_tishby2017`. + +### Demonstrating the coarse-graining theorem numerically + +The following code verifies that applying a progressively more mixing garbling $Q(\alpha)$ — interpolating between the identity matrix ($\alpha = 0$, no garbling) and the fully-mixing uniform kernel ($\alpha = 1$, complete garbling) — decreases *all* f-divergences between the experiment's rows simultaneously. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: All f-divergences contract monotonically under progressive garbling + name: fig-blackwell-dpi-fdivergences +--- +def kl_divergence_rows(p, q, eps=1e-12): + """D_KL(p || q) for row vectors p, q.""" + p = np.clip(np.asarray(p, float), eps, 1.0) + q = np.clip(np.asarray(q, float), eps, 1.0) + return float(np.sum(p * np.log(p / q))) + + +def squared_hellinger_rows(p, q, eps=1e-12): + """H^2(p, q) = (1/2) * sum (sqrt(p_i) - sqrt(q_i))^2.""" + p = np.clip(np.asarray(p, float), eps, 1.0) + q = np.clip(np.asarray(q, float), eps, 1.0) + return float(0.5 * np.sum((np.sqrt(p) - np.sqrt(q)) ** 2)) + + +def total_variation_rows(p, q): + """TV(p, q) = (1/2) * sum |p_i - q_i|.""" + return float(0.5 * np.sum(np.abs(np.asarray(p, float) + - np.asarray(q, float)))) + + +def make_mixing_garbling(alpha, M=2): + """ + Garbling that interpolates between the identity (alpha=0) + and the fully-mixing uniform kernel (alpha=1). + """ + return (1.0 - alpha) * np.eye(M) + alpha * np.ones((M, M)) / M + + +# Rows of the binary experiment: distribution of signal given each state +row1 = np.array([0.8, 0.2]) # state s_1 +row2 = np.array([0.2, 0.8]) # state s_2 + +alpha_grid = np.linspace(0, 1, 200) +dpi_results = { + r"KL divergence $D_\mathrm{KL}(\nu_1 \| \nu_2)$": [], + r"Squared Hellinger $H^2(\nu_1, \nu_2)$": [], + r"Total variation $\mathrm{TV}(\nu_1, \nu_2)$": [], +} + +for alpha in alpha_grid: + Q = make_mixing_garbling(alpha) + ν1 = row1 @ Q + ν2 = row2 @ Q + dpi_results[r"KL divergence $D_\mathrm{KL}(\nu_1 \| \nu_2)$"].append( + kl_divergence_rows(ν1, ν2)) + dpi_results[r"Squared Hellinger $H^2(\nu_1, \nu_2)$"].append( + squared_hellinger_rows(ν1, ν2)) + dpi_results[r"Total variation $\mathrm{TV}(\nu_1, \nu_2)$"].append( + total_variation_rows(ν1, ν2)) + +fig, ax = plt.subplots(figsize=(9, 4)) +colors_dpi = ["steelblue", "darkorange", "green"] +for (name, vals), c in zip(dpi_results.items(), colors_dpi): + arr = np.array(vals) + ax.plot(alpha_grid, arr / arr[0], label=name, color=c, linewidth=2) + +ax.set_xlabel(r"garbling intensity $\alpha$ (0 = identity, 1 = fully mixed)", + fontsize=11) +ax.set_ylabel("divergence normalised by its value at $\\alpha = 0$", fontsize=11) +ax.legend(fontsize=10) +ax.set_ylim(-0.05, 1.1) +plt.tight_layout() +plt.show() + +print("Divergences at α = 0 (no garbling):") +for name, vals in dpi_results.items(): + print(f" {name.ljust(50)}: {vals[0]:.4f}") +print("\nDivergences at α = 1 (complete garbling):") +for name, vals in dpi_results.items(): + print(f" {name.ljust(50)}: {vals[-1]:.2e}") +``` + +All three f-divergences decrease monotonically to zero as the experiment is progressively garbled toward complete mixing. + +This confirms the coarse-graining theorem: a single Blackwell garbling simultaneously contracts every f-divergence between the conditional distributions of signals given states. + +The following code makes the connection between the Blackwell ordering and the DPI explicit. + +It computes multiple f-divergences for experiments of increasing quality $\theta$ (the same parameterization used earlier) and verifies that Blackwell-higher experiments have strictly larger f-divergences. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: More informative experiments have larger f-divergences between rows + name: fig-blackwell-dpi-quality +--- +θ_vals = np.linspace(0, 1, 100) +dpi_quality = { + r"KL divergence $D_\mathrm{KL}$": [], + r"Squared Hellinger $H^2$": [], + r"Total variation TV": [], +} + +for θ in θ_vals: + μ_θ = make_experiment(θ, N=2) + r1, r2 = μ_θ[0], μ_θ[1] + dpi_quality[r"KL divergence $D_\mathrm{KL}$"].append( + kl_divergence_rows(r1, r2)) + dpi_quality[r"Squared Hellinger $H^2$"].append( + squared_hellinger_rows(r1, r2)) + dpi_quality[r"Total variation TV"].append( + total_variation_rows(r1, r2)) + +fig, ax = plt.subplots(figsize=(9, 4)) +for (name, vals), c in zip(dpi_quality.items(), colors_dpi): + arr = np.array(vals) + ax.plot(θ_vals, arr / (arr[-1] + 1e-15), label=name, color=c, linewidth=2) + +ax.set_xlabel(r"experiment quality $\theta$ (0 = uninformative, 1 = perfect)", + fontsize=11) +ax.set_ylabel("divergence normalised by value at $\\theta = 1$", fontsize=11) +ax.legend(fontsize=10) +plt.tight_layout() +plt.show() +``` + +Every f-divergence between the rows $(\mu_1, \mu_2)$ is strictly increasing in experiment quality $\theta$. + +At $\theta = 0$ the rows are both $[0.5, 0.5]$, so every divergence is zero. + +At $\theta = 1$ the rows are $[1, 0]$ and $[0, 1]$, so the KL-divergence is infinite and the Hellinger distance and total variation reach their maximum values. + +### Summary of the DPI–Blackwell correspondence + +The table below collects the precise correspondence between Blackwell's framework and the data-processing and coarse-graining literature. + +| Blackwell / DeGroot | Data processing / coarse-graining | +|:---|:---| +| Garbling $\nu = \mu Q$ | Applying Markov kernel $\kappa$ to a pair $(P, Q) = (\mu_1, \mu_2)$ | +| $\mu \geq \nu$ in Blackwell order | $D_f(\mu_1 \| \mu_2) \geq D_f(\nu_1 \| \nu_2)$ for every f-divergence | +| Sufficiency ($Q$ discards nothing) | Equality in DPI for every strictly convex $f$ | +| DeGroot value $I(\mu; U_H)$ | Mutual information $I(\tilde{x}_\mu;\, \tilde{s})$ (Shannon DPI) | +| Posterior spreads under $\mu$ vs $\nu$ | $D_f$ between rows larger under $\mu$ | +| Blackwell theorem (economic $\Leftrightarrow$ garbling) | DPI for all $f$ $\Leftrightarrow$ single Markov kernel witnesses dominance | +| Chentsov's uniqueness theorem | Fisher information is the unique coarse-graining-contracting metric | +| Information bottleneck $I(T;Y) \leq I(X;Y)$ | DPI for mutual information applied to Markov chain $Y{-}X{-}T$ | + + ## Relation to Bayesian likelihood-ratio learning The lecture {doc}`likelihood_bayes` studies Bayesian learning in a setting that is a special, dynamic instance of everything developed here. diff --git a/lectures/enet_binomial.csv b/lectures/enet_binomial.csv new file mode 100644 index 000000000..d943fae18 --- /dev/null +++ b/lectures/enet_binomial.csv @@ -0,0 +1,200 @@ +1.00,0.46,-0.13,1.29,-0.47,-0.17 +0.00,0.96,0.40,0.98,0.02,-1.02 +0.00,0.23,0.84,-1.13,-0.81,0.36 +0.00,0.21,-0.76,-0.77,1.75,0.08 +1.00,-0.72,0.40,-0.91,0.14,-2.37 +0.00,0.89,-1.77,-0.30,-0.37,-0.55 +0.00,-0.97,-2.21,2.48,-0.47,-0.45 +0.00,1.08,-0.49,0.68,-1.17,1.13 +1.00,1.38,0.50,-0.85,-1.41,0.68 +1.00,-2.10,-0.47,-0.25,0.67,-0.31 +1.00,0.90,-0.55,0.54,2.67,0.64 +1.00,-1.07,0.41,-0.15,-0.21,1.29 +1.00,0.19,0.07,1.37,0.87,-0.73 +0.00,0.88,-0.24,0.34,-1.00,-1.67 +0.00,-0.62,-1.15,0.43,-0.82,-0.30 +1.00,-0.53,-1.55,-0.18,1.81,-0.62 +0.00,1.06,-0.29,0.32,1.30,-0.48 +0.00,0.41,1.04,0.38,0.55,0.59 +0.00,1.05,-0.54,-0.93,0.74,1.39 +1.00,-1.02,-1.56,1.26,2.80,1.13 +1.00,-1.44,1.14,2.23,2.70,-0.05 +0.00,0.48,-1.30,-0.23,0.15,-0.64 +0.00,0.03,1.76,0.27,1.83,1.00 +1.00,-1.32,0.98,0.64,0.02,0.97 +0.00,0.06,-1.63,-0.34,-1.25,-0.19 +0.00,0.16,0.35,-0.14,-0.30,1.44 +0.00,-1.63,0.80,0.55,-1.19,0.57 +1.00,1.64,0.64,1.46,-0.38,-1.41 +0.00,0.32,-1.84,-1.41,1.86,-0.48 +1.00,-0.43,0.47,0.38,-1.37,0.96 +1.00,-0.39,-0.28,0.04,0.16,-1.92 +0.00,-0.59,-0.21,0.66,-0.22,1.02 +0.00,0.47,0.41,-0.51,0.80,0.30 +0.00,-0.08,-1.02,-0.82,0.02,0.04 +0.00,0.03,-0.86,2.13,-0.62,-0.73 +0.00,-0.23,0.61,0.64,0.20,0.57 +1.00,1.33,0.18,-1.67,0.85,2.17 +1.00,1.13,2.11,0.29,-0.77,0.45 +0.00,-0.73,-0.59,-1.04,0.49,0.58 +0.00,-0.01,0.31,-0.03,0.09,0.11 +0.00,-0.74,0.20,-0.11,1.15,-1.63 +0.00,-1.00,-0.95,-0.68,0.08,1.42 +1.00,0.45,0.74,1.22,0.28,-0.34 +1.00,-0.39,-0.46,-0.12,-1.05,0.22 +0.00,-1.84,-0.30,-1.41,0.91,-1.30 +0.00,-0.15,-0.42,-1.13,-0.96,-0.95 +1.00,0.14,-1.54,0.31,-0.04,1.79 +0.00,-0.45,-1.58,0.90,-1.01,-0.75 +1.00,-0.10,0.09,-1.28,0.68,0.81 +0.00,-0.82,-0.61,1.07,-0.28,0.64 +0.00,-0.20,-0.46,0.72,-1.13,2.20 +1.00,-0.82,-0.77,-0.95,-1.42,0.69 +0.00,-1.73,-0.14,0.06,0.34,-2.60 +0.00,-0.23,-0.74,0.25,0.45,0.55 +1.00,-0.35,1.14,-0.49,1.18,0.09 +0.00,1.77,-0.37,0.99,-1.19,-0.16 +1.00,-0.09,2.05,-1.34,1.45,-0.08 +0.00,-1.55,-0.17,-0.38,-0.43,-1.25 +0.00,-0.22,-0.32,-0.03,0.51,0.02 +0.00,-1.33,-0.89,-0.01,0.01,0.76 +1.00,3.01,-0.69,0.05,-0.70,2.67 +1.00,0.32,2.45,1.08,-1.05,-0.66 +0.00,-0.23,-1.99,0.13,0.55,1.62 +0.00,-1.49,0.03,0.78,-1.80,0.85 +0.00,-0.36,-2.44,-0.23,1.04,1.70 +0.00,1.57,-0.80,0.17,0.08,-0.41 +1.00,-0.15,0.57,1.40,-1.18,-0.32 +1.00,1.15,-1.19,-0.32,-0.58,-0.27 +0.00,-0.01,0.35,-0.48,-0.69,-1.49 +0.00,0.25,1.09,0.08,-1.04,-1.35 +0.00,1.27,1.49,-0.26,-0.31,-1.74 +1.00,-1.08,0.22,-1.60,-0.63,-1.29 +1.00,0.79,-0.23,-0.30,0.14,0.47 +1.00,-1.87,1.21,-0.93,-0.14,1.13 +0.00,-0.20,-1.18,-0.85,0.34,0.36 +1.00,-0.07,0.11,-0.10,-0.13,-1.61 +0.00,-0.54,-0.89,0.91,0.62,1.57 +0.00,1.74,-0.54,1.10,-0.81,-0.93 +1.00,0.82,-0.22,-1.17,-0.70,-0.64 +1.00,0.70,0.33,0.25,-0.79,1.92 +0.00,0.44,1.09,0.15,0.33,-0.36 +1.00,0.05,-0.62,0.02,-0.19,-0.78 +1.00,-0.36,0.87,-0.13,0.73,-0.74 +1.00,0.27,0.02,-0.51,0.76,-0.15 +1.00,1.09,-0.47,-0.37,-0.47,-0.77 +0.00,-0.92,0.22,0.12,-1.88,0.12 +1.00,1.60,-1.32,0.27,-1.19,-0.57 +0.00,-0.03,0.00,-1.01,0.91,1.00 +1.00,0.01,0.81,0.03,0.51,-2.15 +0.00,1.40,-0.13,-0.46,-0.62,-0.78 +1.00,1.21,-0.89,0.72,0.24,0.96 +1.00,0.43,0.26,1.14,0.67,-0.17 +0.00,-0.94,0.83,-1.01,-0.22,-0.20 +1.00,0.30,0.88,-1.13,1.31,1.41 +0.00,-0.15,-1.34,1.41,0.36,0.14 +0.00,0.79,-1.00,0.33,-1.23,-0.72 +1.00,-0.44,-1.05,1.30,-1.39,0.27 +0.00,-0.56,0.62,-0.63,-0.40,-1.83 +1.00,1.73,-0.04,-0.99,-0.87,-0.26 +1.00,-2.85,-0.19,1.13,-0.87,0.66 +0.00,1.46,-0.76,-1.04,1.75,0.83 +1.00,-0.16,-0.98,0.12,0.71,0.78 +0.00,1.52,-1.04,0.23,-1.45,0.26 +0.00,0.19,-0.38,-1.34,-0.01,3.35 +0.00,-1.08,-1.65,-0.45,-0.67,-0.46 +0.00,0.33,-0.79,0.15,1.00,-0.53 +1.00,-0.40,1.48,-0.60,-0.33,2.67 +0.00,-0.22,-0.60,1.22,-1.88,0.34 +1.00,1.30,-1.54,0.41,-0.31,-0.85 +1.00,1.81,0.91,-0.85,-1.61,-2.00 +1.00,-1.64,-0.58,1.46,0.34,0.93 +1.00,1.82,0.47,-1.62,1.76,1.62 +1.00,-0.09,-1.69,1.76,-0.77,-0.38 +0.00,0.38,-1.07,-0.67,1.24,-0.78 +0.00,0.55,-0.52,-0.86,0.87,-1.39 +1.00,-0.78,-0.54,0.26,-0.28,1.86 +1.00,-0.35,-0.13,0.52,2.25,1.77 +0.00,-2.27,-1.99,1.03,1.37,0.20 +0.00,-0.72,0.30,0.54,0.03,0.02 +1.00,2.49,-0.48,-1.96,-1.33,-0.83 +1.00,-0.56,-0.41,1.20,-0.61,-0.44 +0.00,1.09,-0.98,0.56,0.47,0.91 +0.00,-0.09,-1.04,-0.91,0.49,-0.38 +0.00,-0.47,1.19,-0.41,-0.94,0.16 +1.00,-0.77,-0.70,0.56,-1.31,0.84 +1.00,1.26,1.24,1.00,0.39,0.99 +1.00,-0.92,0.21,0.97,-1.27,-0.76 +0.00,0.10,0.37,0.01,0.01,-0.38 +1.00,-1.18,-0.80,0.09,0.78,-0.60 +1.00,0.68,-0.70,2.26,-1.15,0.50 +1.00,0.34,0.90,1.99,1.43,-0.49 +0.00,1.08,-0.75,1.00,-1.18,-1.04 +1.00,-0.20,0.87,1.41,0.12,0.04 +0.00,-0.81,-0.08,-0.34,-0.15,-0.66 +1.00,0.33,-0.28,0.46,-0.54,-2.59 +0.00,0.46,1.45,-0.40,1.16,-0.40 +0.00,1.46,1.08,-0.81,0.52,0.22 +1.00,-0.55,-0.08,-1.52,-0.98,1.20 +1.00,-1.39,-0.14,1.05,2.06,-1.49 +0.00,0.19,-1.07,-0.09,1.19,-0.21 +0.00,1.82,1.69,-0.35,0.61,0.05 +0.00,-0.42,-2.00,-0.18,-0.25,-0.84 +1.00,1.09,-0.17,1.30,1.19,0.37 +1.00,-0.01,0.60,-0.46,-1.40,2.05 +0.00,-1.77,-1.09,0.54,-0.40,0.07 +0.00,-0.97,0.30,1.06,-0.65,1.01 +0.00,0.13,-0.92,0.47,1.55,0.14 +1.00,0.66,-0.43,-1.46,0.14,-0.57 +0.00,0.77,2.47,-0.18,-0.63,-1.01 +0.00,1.29,1.07,-0.02,-0.33,0.09 +1.00,0.71,0.45,-0.19,0.76,-0.25 +0.00,0.68,0.27,0.29,0.52,0.30 +1.00,-0.05,-0.81,1.63,-1.04,1.09 +1.00,1.26,-0.09,0.02,1.18,0.27 +1.00,-0.11,1.90,1.12,1.27,0.02 +0.00,0.30,1.55,-1.99,-0.08,-0.58 +1.00,-1.11,1.76,0.78,0.17,-1.04 +1.00,-2.16,0.04,-0.38,-0.78,-0.13 +0.00,-0.77,1.66,0.94,0.98,-0.12 +0.00,-1.49,0.86,0.25,0.62,0.24 +1.00,-0.69,-0.81,-0.95,0.25,1.49 +0.00,-0.85,0.57,0.70,0.21,-0.73 +1.00,1.42,1.42,-1.16,1.46,0.37 +0.00,-0.48,-0.76,-0.10,1.20,0.65 +0.00,1.62,0.40,1.38,0.32,-0.61 +0.00,-0.08,0.74,-0.01,-0.46,0.79 +0.00,-1.33,1.05,-2.14,-1.26,0.45 +1.00,0.43,-0.49,-1.63,1.94,-0.41 +1.00,0.05,0.15,0.32,-0.22,-1.02 +0.00,1.11,-0.55,0.25,-0.70,-0.80 +1.00,1.67,-1.24,0.95,0.13,-0.16 +1.00,-1.51,-0.99,-0.77,2.36,1.66 +1.00,-1.16,1.29,0.35,1.18,0.16 +0.00,0.31,-1.03,-0.23,-0.10,0.17 +1.00,0.45,0.13,-0.67,-1.20,1.07 +1.00,-0.26,-0.09,-0.46,-0.10,0.05 +1.00,-0.18,-0.39,-0.05,0.34,0.29 +0.00,-0.28,0.71,0.43,0.63,-1.07 +0.00,-0.93,1.54,0.53,1.57,0.31 +1.00,-0.20,1.57,0.67,-0.14,-1.16 +1.00,0.69,0.15,0.01,0.36,0.12 +1.00,1.66,-0.34,0.96,0.11,-0.31 +1.00,0.90,0.23,-1.77,1.44,0.23 +1.00,-0.29,-0.41,-0.65,1.51,-0.97 +1.00,-0.35,0.17,-0.45,-0.98,-0.87 +0.00,-0.81,-0.77,-0.74,0.16,0.18 +0.00,0.13,-2.27,0.26,0.39,-0.37 +1.00,-0.20,-1.17,-0.91,1.83,0.87 +1.00,-1.24,0.43,0.14,-0.66,0.02 +0.00,-0.97,-1.10,0.14,-2.43,-0.20 +0.00,-0.40,1.28,-0.07,1.76,0.16 +0.00,-0.59,-1.22,-0.11,-0.56,0.20 +0.00,1.41,0.32,-0.83,0.34,-1.51 +1.00,2.10,0.59,0.03,-1.88,-1.52 +1.00,0.21,0.84,1.79,0.55,0.20 +0.00,-0.16,-0.07,0.99,-1.83,-1.28 +1.00,1.70,0.90,-0.20,1.13,-0.67 +0.00,1.34,0.10,-0.44,0.21,0.97 +1.00,1.59,-0.49,1.58,0.06,1.47 +0.00,-1.27,-2.48,-0.23,-0.64,-1.84 diff --git a/lectures/enet_poisson.csv b/lectures/enet_poisson.csv new file mode 100644 index 000000000..27ad0139b --- /dev/null +++ b/lectures/enet_poisson.csv @@ -0,0 +1,200 @@ +1.00,0.19,-1.29,0.07,-2.64,1.20 +0.00,0.78,0.02,-0.46,-0.29,-2.26 +0.00,-1.58,2.16,0.31,-0.18,-1.72 +0.00,-0.54,-2.32,-0.48,1.03,-0.09 +1.00,-0.51,1.89,-0.07,0.67,0.57 +1.00,1.05,-1.20,-0.28,1.48,-0.89 +0.00,-2.48,-0.87,-1.05,-0.68,-1.58 +1.00,-1.36,-0.53,-0.67,0.37,-1.10 +3.00,-1.34,1.21,1.65,1.29,-1.42 +2.00,-0.67,-0.49,2.41,-0.96,-0.83 +0.00,0.35,-1.14,-0.46,-0.08,-1.09 +1.00,0.94,-1.71,1.38,-0.67,0.27 +1.00,-0.16,-1.08,1.04,-1.58,0.45 +1.00,0.68,0.48,-0.83,-0.48,1.09 +0.00,-1.11,-0.10,-0.19,-0.37,-2.06 +0.00,-0.79,-0.48,-0.73,-1.69,0.69 +1.00,-1.07,-0.48,-0.77,1.61,-0.58 +0.00,1.76,0.42,-1.21,-0.03,0.32 +1.00,-1.39,0.48,0.27,0.15,-0.10 +0.00,0.53,0.55,0.77,-0.22,-0.29 +0.00,-0.89,0.94,0.59,-0.23,-1.17 +0.00,-0.29,0.04,0.07,0.32,-0.04 +1.00,-1.20,-0.48,0.17,-1.80,1.33 +2.00,-0.04,1.63,0.20,-0.89,-1.21 +2.00,-0.25,-1.22,0.31,0.61,1.03 +0.00,0.00,0.31,1.19,0.24,-1.37 +1.00,-0.60,0.19,-1.55,0.08,-0.41 +0.00,-1.13,0.08,0.73,-0.81,-1.36 +1.00,-0.44,1.56,-0.55,-1.33,-1.27 +1.00,0.12,-0.44,1.64,0.54,-0.51 +0.00,-0.82,-0.40,-1.29,-1.15,-0.75 +3.00,-0.18,0.12,1.23,1.16,-0.38 +0.00,-0.44,-0.02,-1.71,-1.52,-1.01 +1.00,-0.81,-1.17,-0.53,1.00,-0.48 +1.00,-0.48,1.09,-0.73,0.53,0.78 +3.00,0.30,-0.41,1.63,0.14,0.88 +0.00,1.76,-0.50,0.81,-2.67,-1.31 +3.00,-1.82,2.02,1.00,0.02,1.96 +1.00,0.90,-1.35,-1.40,0.35,-2.39 +0.00,-0.39,-0.12,-0.39,0.37,-0.25 +1.00,1.93,-0.02,0.99,1.11,-1.11 +2.00,0.43,0.86,-0.65,2.61,-1.01 +1.00,-0.92,0.32,-0.28,-0.52,-0.40 +0.00,0.81,0.26,-0.32,0.05,-1.44 +0.00,2.15,0.08,-1.06,0.20,-1.01 +2.00,2.07,-0.59,0.70,-0.73,-0.21 +2.00,1.21,0.84,-0.63,0.12,0.26 +3.00,0.07,1.05,-0.71,0.21,-0.47 +1.00,0.75,-2.16,-1.62,-0.21,-2.59 +0.00,-0.49,0.51,1.60,-0.25,-0.78 +2.00,0.45,-0.18,-0.44,0.04,0.23 +0.00,0.84,-2.40,0.91,-1.12,-1.88 +0.00,0.08,-0.99,-2.44,1.03,-1.23 +2.00,-0.22,0.33,-1.57,0.78,0.73 +0.00,-0.34,1.34,0.92,-0.35,0.55 +2.00,0.62,0.91,-0.31,1.87,-0.04 +1.00,0.83,0.85,0.08,-0.16,0.68 +2.00,0.39,-0.76,0.62,0.10,0.06 +2.00,2.30,0.29,-0.73,0.14,-1.38 +0.00,-0.10,-0.06,-1.66,0.73,-0.14 +0.00,0.71,-0.06,-1.23,0.45,0.13 +0.00,-0.53,-0.23,-0.54,0.37,-0.54 +0.00,0.01,-0.32,-0.14,-1.93,0.83 +0.00,-1.78,0.90,-1.25,-1.34,-0.75 +1.00,-0.73,0.04,1.36,0.45,0.38 +0.00,-0.84,-0.36,-0.28,2.21,0.01 +0.00,-0.12,1.54,-1.89,-0.81,-0.45 +1.00,-1.36,1.31,-0.05,0.08,-0.75 +1.00,-1.26,-1.82,-0.05,-1.28,-0.37 +0.00,0.77,0.89,-0.11,1.17,1.10 +1.00,0.22,0.85,-0.43,-0.96,0.50 +0.00,0.51,0.26,-1.28,-0.85,2.98 +2.00,0.74,0.19,0.53,-1.21,-0.07 +2.00,1.41,-0.50,-0.17,2.14,-1.84 +0.00,-0.01,-0.34,0.35,-0.58,0.64 +1.00,-0.93,0.30,-0.59,0.34,-1.34 +0.00,-0.27,-1.10,0.41,0.12,1.11 +1.00,-0.19,1.03,1.03,1.09,-2.01 +0.00,0.09,-0.02,0.60,0.69,-1.07 +0.00,-0.48,-0.09,-0.39,0.19,-0.28 +0.00,0.03,-0.66,0.65,-1.85,-1.19 +3.00,-0.68,1.02,1.21,0.87,-0.02 +0.00,0.13,-1.03,-0.45,-0.17,-0.92 +0.00,-0.63,0.03,-1.05,0.83,0.70 +1.00,0.56,0.14,-0.31,2.02,0.50 +0.00,-0.06,-0.44,1.05,-1.04,-1.00 +2.00,-2.93,1.73,-1.79,1.59,0.08 +1.00,0.17,0.42,-1.31,2.19,-1.60 +1.00,0.07,-0.16,-0.12,-0.44,-0.37 +0.00,0.28,0.60,0.02,-0.04,-0.86 +2.00,-1.56,-0.25,1.00,-0.13,0.59 +0.00,-1.22,0.24,-1.64,-0.75,-0.27 +1.00,2.10,-1.71,-0.63,-0.90,0.46 +2.00,-0.49,2.23,1.00,0.23,0.09 +2.00,-0.64,3.33,-0.40,0.51,0.16 +1.00,1.52,-0.17,0.30,-0.90,-0.83 +2.00,1.29,-1.89,0.41,0.12,0.56 +1.00,0.22,1.08,0.25,-0.85,-0.25 +0.00,0.31,-0.17,-0.20,0.40,-0.06 +1.00,0.29,-0.73,-2.33,0.58,0.42 +2.00,0.59,-0.27,0.07,-0.33,-0.57 +0.00,-1.59,0.93,0.42,-1.91,-0.48 +2.00,-0.15,0.34,-0.91,-0.91,1.83 +1.00,1.88,-0.09,-1.47,0.69,0.30 +0.00,1.56,-2.15,-0.13,-0.66,-0.21 +0.00,0.29,1.93,-0.31,-0.95,-1.23 +1.00,0.24,1.16,0.35,-0.22,1.00 +1.00,0.72,2.22,0.79,0.01,-0.24 +3.00,0.30,1.15,0.67,0.73,1.53 +0.00,-2.42,0.45,-0.63,-3.36,0.19 +1.00,0.39,0.53,-1.37,0.14,0.07 +1.00,-1.96,0.80,1.34,0.94,-0.46 +0.00,0.45,1.15,1.82,-0.82,-0.07 +1.00,0.01,1.80,-1.56,1.42,-0.58 +4.00,-0.42,0.95,-0.13,-0.54,1.29 +1.00,1.02,-1.19,-0.59,-0.39,0.13 +1.00,-0.28,0.13,0.17,0.54,-0.17 +3.00,0.33,0.19,0.58,-1.40,-0.64 +0.00,-1.80,2.59,-1.06,0.99,1.27 +0.00,-1.63,-1.26,-0.67,0.03,-1.31 +1.00,0.81,-0.03,-1.63,-0.10,-0.09 +0.00,0.37,-0.95,-1.88,0.44,-0.58 +1.00,-0.08,-2.13,-0.48,1.84,-0.21 +3.00,0.10,-0.62,1.05,-0.43,0.43 +1.00,0.08,-0.79,0.19,-1.91,-0.73 +2.00,-0.32,0.93,-1.42,0.25,0.42 +0.00,1.02,0.72,-0.97,0.26,-2.89 +1.00,-1.60,0.18,-0.32,0.60,-0.71 +0.00,-1.80,1.92,-0.32,-0.17,1.54 +1.00,-0.13,2.33,-0.10,-0.03,0.87 +1.00,-0.38,1.09,1.65,-0.48,-1.06 +2.00,-1.45,-0.19,1.51,-1.30,0.95 +1.00,0.93,-1.83,-0.75,1.29,-1.09 +1.00,-0.24,0.75,-0.94,-1.03,0.04 +1.00,0.15,-1.06,0.32,-0.43,-1.10 +1.00,0.76,-0.49,0.65,1.62,-1.81 +1.00,1.07,-0.26,0.96,-2.02,0.57 +2.00,-0.05,-0.24,1.08,0.61,0.10 +0.00,0.62,-0.80,1.15,1.08,0.41 +1.00,-1.75,-1.60,1.10,-1.41,-3.07 +5.00,0.39,0.04,-1.42,0.36,2.00 +0.00,-0.03,-0.53,-0.71,-0.32,1.14 +1.00,0.52,0.80,-0.97,-1.50,-0.53 +1.00,2.13,-2.04,0.25,0.59,-1.84 +1.00,-0.10,-1.13,0.79,-0.38,0.65 +1.00,0.34,-0.06,-0.89,0.16,-1.86 +0.00,0.07,-1.35,-2.24,-0.21,-0.06 +0.00,1.94,-0.53,0.34,0.29,0.37 +1.00,-0.29,0.50,1.08,-1.00,0.48 +0.00,-0.45,1.04,-0.46,-0.78,-2.26 +1.00,0.61,0.24,2.04,-0.29,1.23 +0.00,1.61,0.77,-0.71,0.04,-1.81 +1.00,1.65,-0.05,0.11,-1.94,-0.91 +3.00,0.11,0.09,-0.74,0.74,1.81 +1.00,-1.17,-0.91,1.10,1.19,0.53 +3.00,1.46,0.37,2.49,-1.06,0.18 +3.00,1.69,0.89,0.09,0.35,-0.26 +0.00,0.85,-0.59,-1.19,-0.29,-0.39 +1.00,-1.43,0.01,-0.71,0.88,-0.98 +0.00,0.58,0.85,-0.68,-1.23,-0.61 +1.00,-0.35,-1.19,-0.11,-0.23,3.35 +0.00,-1.78,-0.35,0.44,-0.12,0.36 +0.00,-0.52,0.60,0.66,0.28,-0.67 +1.00,-0.63,1.34,-1.16,0.67,0.74 +3.00,-0.44,1.29,0.77,0.51,0.42 +0.00,-3.13,-0.32,-1.00,0.31,-0.85 +0.00,-1.13,0.30,0.93,-1.11,-1.46 +2.00,1.13,-0.61,-1.11,-0.31,1.20 +1.00,-1.71,0.69,-0.88,-0.73,-0.12 +1.00,-0.09,2.04,-0.77,-0.00,0.07 +0.00,-0.03,1.15,0.30,1.33,0.38 +0.00,-1.12,0.21,-1.95,0.07,0.22 +0.00,-0.69,-0.46,-0.90,-0.00,1.34 +0.00,-0.47,0.28,1.22,0.15,-1.31 +0.00,-0.18,-0.09,0.57,-0.37,-1.41 +2.00,-2.29,1.24,1.69,-0.06,1.71 +0.00,0.28,-1.66,1.06,-0.59,-1.19 +3.00,0.34,2.06,-0.25,1.11,0.73 +1.00,1.49,-0.12,2.39,0.82,-1.39 +4.00,0.97,-0.86,-1.07,0.82,-0.73 +0.00,-1.57,0.77,0.70,0.23,-1.38 +3.00,0.82,1.46,0.41,1.04,0.22 +2.00,0.96,-1.64,-1.18,1.48,-0.40 +0.00,0.66,0.34,0.87,-1.01,-0.12 +1.00,-0.27,-1.33,0.31,-0.30,0.43 +2.00,1.15,0.24,1.47,0.38,-0.66 +2.00,-1.06,1.30,-0.52,0.20,-0.38 +1.00,-1.98,0.65,-1.93,0.78,0.79 +2.00,0.33,0.05,1.02,1.01,-1.77 +3.00,1.61,1.40,2.53,-0.18,1.15 +1.00,0.52,-0.70,1.20,0.32,-1.74 +2.00,0.05,-0.87,0.69,-0.12,-0.11 +3.00,2.26,0.95,-0.42,0.50,1.57 +1.00,-1.87,-1.71,-0.20,0.64,1.12 +2.00,1.01,-0.68,1.59,0.92,2.70 +1.00,0.67,-0.13,-0.91,-0.13,-1.62 +6.00,0.82,1.68,0.93,1.09,1.76 +1.00,-0.93,-0.50,0.92,-0.40,-1.18 +1.00,-0.68,-0.24,-0.91,1.33,0.22 +2.00,2.13,0.88,-0.42,-0.88,1.70 diff --git a/lectures/gee_simulation_check.txt b/lectures/gee_simulation_check.txt new file mode 100644 index 000000000..e69de29bb diff --git a/lectures/lasso_data.csv b/lectures/lasso_data.csv new file mode 100644 index 000000000..12e133f53 --- /dev/null +++ b/lectures/lasso_data.csv @@ -0,0 +1,300 @@ +-178,3,59,-132,-32,-126 +303,17,-241,12,-81,41 +-54,159,2,5,-68,-150 +395,122,5,34,135,215 +-52,101,29,17,100,60 +255,-14,-146,-60,46,29 +-21,192,-131,51,-181,-111 +278,49,-69,69,-51,93 +-38,25,-48,-115,-5,30 +-191,101,75,-109,-48,82 +-373,-179,15,-76,56,-86 +123,-68,-78,-115,-26,-85 +-32,-102,84,42,45,41 +-78,-32,48,74,-57,34 +225,4,-115,-107,125,130 +-162,29,22,24,-31,144 +32,88,-32,67,-65,-93 +28,-50,-80,-43,234,-107 +35,-244,132,-31,47,154 +1,62,61,92,-58,13 +-427,-176,67,-152,-82,-94 +-54,-52,3,-54,-57,16 +185,-46,-173,95,114,-50 +-111,-49,19,25,-46,-129 +-212,-131,-91,-37,-87,-93 +352,-12,-15,106,-14,146 +76,32,-78,65,115,60 +34,28,-35,-22,73,11 +243,58,-189,-101,112,230 +244,-37,-241,-9,166,16 +10,122,-112,-31,-136,-32 +170,-128,-165,106,18,-61 +38,54,-104,-53,32,17 +-143,-142,74,-119,-140,3 +68,-118,-114,68,109,43 +168,-104,-215,7,83,65 +-80,16,-115,-74,-44,14 +432,-103,-317,-34,-87,35 +-173,-50,-40,-74,211,-73 +-156,-102,39,-60,-8,-12 +-306,143,43,19,41,-122 +90,28,-6,-31,-20,62 +11,140,-101,73,-67,-63 +30,145,-39,75,93,-46 +36,-20,26,-76,-88,86 +-398,-194,234,9,48,-37 +137,-71,-53,-128,-41,111 +28,43,-192,-97,-249,-4 +-165,-40,114,63,-33,-5 +38,71,58,73,34,9 +174,-15,69,65,-88,15 +-12,8,122,153,126,-61 +50,-61,-74,-72,38,18 +-520,9,277,-157,42,-19 +-24,-79,97,-34,7,29 +42,-83,-64,83,74,-44 +-186,92,35,-123,29,-85 +-90,-124,-149,107,31,-228 +-228,-165,5,-189,134,5 +99,-23,-92,97,102,-44 +145,108,105,74,58,70 +16,54,48,157,4,-170 +-162,9,9,-45,39,-6 +-96,-87,82,-61,69,71 +51,23,-114,-163,172,112 +117,-129,-27,22,-40,-107 +-140,24,81,253,-26,-186 +236,98,82,154,-71,90 +-500,-16,118,-154,-104,-34 +-368,1,155,-217,339,-71 +-325,155,-126,-244,65,-70 +-96,-112,25,67,29,-171 +376,-87,-237,-22,6,40 +147,-46,103,21,151,42 +236,-107,-177,60,-131,9 +102,-87,-50,58,-99,-128 +408,-31,-119,163,96,82 +73,19,-56,109,-13,-82 +99,32,24,62,-110,31 +-183,38,59,12,-82,4 +40,-37,135,99,-3,-4 +-224,213,54,-99,-133,104 +59,-111,-18,-111,-68,96 +-68,8,-63,-48,226,-206 +-86,-62,141,117,-24,-55 +-370,143,116,50,281,-211 +-333,-14,-45,-203,47,-74 +62,-29,-82,-82,-199,102 +-260,0,-72,-71,-76,-181 +-165,152,110,62,-6,-56 +-101,-13,40,17,123,-65 +193,234,-26,0,-149,60 +112,-74,-59,146,116,25 +36,26,111,92,-117,62 +-106,-33,85,61,83,-200 +10,-162,-23,-34,89,30 +92,162,-158,-24,46,-58 +-200,117,55,-109,-16,-7 +154,-94,-110,38,-74,-62 +-115,150,-48,-117,-77,-126 +124,32,-94,13,11,77 +97,-118,-30,36,-44,87 +-42,-31,14,-64,-323,84 +133,12,54,113,-154,47 +270,-87,-155,97,241,143 +18,-134,-41,-64,-20,-92 +-149,-98,13,-39,78,35 +66,-73,-194,10,-167,-190 +268,85,-171,-33,1,114 +153,6,13,155,-138,40 +-163,31,-52,-49,26,18 +-232,-53,112,11,-131,-146 +-87,151,4,91,38,-52 +38,2,-101,-7,29,35 +1,235,131,64,50,42 +9,64,123,195,-78,-10 +242,-48,-63,-10,189,9 +-234,25,38,29,89,-66 +380,159,-167,218,130,-125 +-369,-169,215,77,-13,-205 +220,92,-109,-9,-351,126 +109,-113,-28,-35,-40,76 +-10,46,71,11,-81,-68 +191,-85,-24,96,66,64 +6,-2,72,51,-18,95 +158,153,14,233,88,23 +-113,28,1,-94,254,-58 +97,67,13,16,-66,121 +11,55,60,99,117,-87 +37,169,-47,-44,-14,30 +36,23,-141,-180,-110,11 +-380,138,149,10,-117,-160 +-93,-115,-17,102,-91,-59 +-160,108,101,-45,-36,8 +-93,-109,-1,27,17,-72 +32,60,44,-42,100,-57 +-266,110,173,-44,-81,-28 +-220,-43,47,-0,-59,-21 +-52,51,-4,36,43,47 +104,-75,27,3,24,67 +-184,-125,-44,-48,43,-91 +504,-159,-118,30,8,233 +-320,-12,-35,-106,146,-145 +55,-63,-176,-59,43,-59 +-47,-134,91,70,117,46 +104,-28,39,14,11,102 +110,-255,52,35,-91,49 +-333,-179,45,55,-29,-161 +-304,-64,28,-87,122,-161 +-15,30,-35,84,188,-170 +-122,42,-29,38,160,-99 +295,-100,-51,100,-75,-10 +-281,-65,-45,11,109,-45 +-15,82,56,97,90,103 +80,-32,113,163,3,92 +91,37,-58,4,-18,26 +192,-73,-36,-2,-90,130 +84,-186,50,-3,-42,-6 +28,-109,-151,84,-228,-90 +-281,101,57,-88,-176,3 +118,-103,154,321,-16,-8 +-314,29,135,-135,17,-16 +11,100,187,14,17,123 +-3,-108,54,14,146,42 +365,-7,-84,187,-174,-6 +374,-113,-188,21,61,33 +-67,18,-30,-29,7,12 +69,-181,-51,-21,11,25 +62,-95,-135,125,-99,-133 +448,209,-60,58,-69,100 +203,25,-136,64,-20,-103 +-346,-152,153,33,7,-80 +-3,-17,3,-47,-43,-44 +-32,-156,-3,72,-133,27 +-11,57,23,-14,43,98 +281,31,-50,-13,-89,186 +-233,-180,55,-82,61,-98 +225,-79,16,243,8,37 +25,-80,28,133,-57,-228 +86,20,-37,-38,-76,28 +-184,-113,33,-62,54,21 +-39,-31,-42,61,-27,-80 +243,-46,-159,-37,181,146 +143,-5,9,11,-133,111 +-131,-36,-8,-26,-106,-108 +252,-53,-55,121,-72,13 +20,135,-14,-187,-54,12 +-428,-63,-75,-183,-45,-144 +266,-105,-2,30,37,125 +-346,169,156,101,-130,-355 +233,-185,-88,122,67,18 +331,-81,-20,158,-102,-134 +93,54,23,89,115,-31 +-229,-62,65,-96,135,-81 +-115,-133,64,-79,89,107 +309,-120,-52,84,37,91 +-221,-24,70,-193,152,288 +-305,36,88,-52,10,-56 +-137,78,77,1,-32,6 +149,-68,-27,136,54,-40 +-13,-213,73,12,-7,116 +152,-102,96,75,45,-20 +38,220,-38,-46,-120,39 +138,67,-31,-41,55,36 +-331,49,125,-94,-105,-60 +-2,-97,-85,81,-11,-48 +130,-50,35,112,74,-15 +-87,62,76,-14,127,-87 +23,-121,-8,72,-45,-48 +43,-81,48,42,-53,74 +125,-76,-81,58,19,62 +23,14,137,86,-186,131 +67,35,-87,-39,16,50 +-274,61,137,71,17,-146 +-126,-76,88,16,-33,-69 +-284,-48,32,18,73,-89 +-189,34,201,-116,-17,38 +280,47,-21,255,10,59 +-142,-98,21,-30,-108,-37 +-229,-14,153,26,-47,-108 +8,-110,-40,-30,-54,-8 +-34,-95,-79,30,134,-51 +-163,-128,67,25,-17,-132 +186,-100,-25,60,-29,206 +129,-33,-128,97,-100,-39 +-63,-68,30,6,0,134 +-305,-85,58,-136,162,134 +-117,-77,-77,-38,127,-53 +257,31,25,166,-55,30 +23,-5,71,21,28,-27 +-83,-117,-65,-4,-238,-50 +-11,116,157,60,-69,-34 +-70,77,133,-69,-91,128 +122,145,28,-88,20,193 +-241,-72,-25,14,-50,-57 +90,156,63,-7,135,84 +200,63,-63,6,-46,123 +-337,131,185,-8,44,-91 +380,-37,-47,205,-59,37 +77,13,90,176,137,32 +23,-38,-36,13,242,34 +-65,97,82,-48,76,134 +7,-18,143,48,-17,79 +-263,11,228,-193,90,37 +-86,-84,55,-65,51,-129 +-295,79,78,-119,51,-85 +-139,-75,121,-91,58,43 +29,226,112,154,-142,-136 +-32,175,-55,-115,91,-85 +-130,-50,158,64,-65,59 +44,-28,-45,-129,-221,20 +-94,130,117,13,-16,201 +93,148,53,-52,84,197 +74,-26,-18,-20,16,64 +-91,-56,-69,-144,81,27 +-261,-122,21,-95,-29,-207 +333,61,-38,-51,113,234 +33,108,42,14,77,188 +306,12,-134,4,-20,98 +-60,-81,131,77,86,-7 +-136,97,35,23,-73,-55 +127,129,-107,21,170,-54 +-236,-10,82,23,-98,-76 +-185,-144,37,-56,-230,-129 +132,-98,-33,42,-12,30 +-197,19,-49,-205,37,1 +240,-207,-26,-16,-171,253 +55,3,23,-43,-67,188 +121,-48,20,82,-13,-39 +-59,203,112,-27,-26,125 +-32,68,-27,15,-112,-59 +138,15,34,202,-123,14 +173,70,-80,27,79,-23 +-72,-93,109,21,-44,116 +-33,-165,119,-160,-197,54 +-286,109,-12,-135,-20,-99 +168,-12,-0,51,-14,119 +245,47,-168,111,-3,34 +-149,134,-41,-97,31,-103 +-66,32,-35,-284,-14,157 +-80,26,124,31,107,-45 +-449,-50,54,14,-67,-39 +213,84,-193,112,-77,-55 +-58,-37,-420,-54,168,-133 +84,4,-63,47,-75,-11 +56,-69,-96,-118,37,52 +228,-38,-25,54,135,69 +-119,-1,70,136,152,-98 +274,-42,-132,85,-117,138 +-73,-84,145,-81,22,156 +142,-86,-13,117,-52,135 +97,91,-75,33,165,-81 +484,-43,-107,196,-104,125 +-130,142,-13,-11,-109,-213 +-230,41,-1,-131,-113,-56 +157,-77,134,198,-218,132 +-268,-128,-127,-66,-65,-190 +159,-63,-45,118,139,-8 +21,-100,-128,-158,-61,-12 +48,41,-54,248,62,-185 diff --git a/lectures/y_arma_data.csv b/lectures/y_arma_data.csv new file mode 100644 index 000000000..475d23871 --- /dev/null +++ b/lectures/y_arma_data.csv @@ -0,0 +1,251 @@ +y_arma11,y_arma14,y_arma41,y_arma22,y_arma50,y_arma02,y_arma11c,y_arma14c,y_arma41c,y_arma22c,y_arma50c,y_arma02c +-2.047076594847129505e-01,1.394072226185005814e+00,-7.618372133529949242e-01,-4.064523617925941412e-01,-9.835047173983663127e-01,6.143504692328910499e-01,4.259432631872511976e+00,6.738236941732065333e+00,3.921172305736134511e+00,4.841410872820988587e+00,3.567921955380871424e+00,5.020264181479198839e+00 +2.537649126243639586e-01,4.567375047627768581e-01,-1.169637832882710837e+00,-2.904655508294423649e-01,1.933152260847050830e-01,6.429869148939288959e-01,4.398428009976181663e+00,7.368314202772689114e+00,5.710735793018799633e+00,4.558666615338087702e+00,5.627433726992007834e+00,4.271942218168720551e+00 +-1.614848622683232593e-01,-1.272326703442733020e+00,-2.554150415613765990e+00,4.377894222244522737e-01,-3.224625683687642463e-01,-5.829343493568055479e-01,3.894220064383869140e+00,6.410706525591642446e+00,6.653104231835761162e+00,4.369696549146111941e+00,5.441687121317324127e+00,3.719432040526521988e+00 +-8.586475013185908001e-01,-9.458320913213407177e-01,-1.416810339850927303e+00,1.292133237669666368e+00,-2.434714935535151170e+00,3.225999093088858238e-01,4.254945463650283699e+00,6.774751472682821074e+00,5.813209651463687067e+00,4.848351385863497676e+00,3.693440719499076152e+00,5.799271295947738381e+00 +1.127289339992149531e+00,-6.883717809906886309e-02,-2.004237998790744379e-01,1.802924788488173835e+00,-1.990610824223469688e+00,-4.553547958839088139e-02,4.888509710140729325e+00,4.026927273300911381e+00,5.506526563168436361e+00,4.708763021171975538e+00,2.747097130265434117e+00,5.199862796144467225e+00 +2.926896038343052453e+00,-8.584562919036571316e-01,-1.274069813257600048e+00,-3.157042843910953245e-01,-2.664447937057306537e-01,5.833794675456231982e-01,6.113454539016629496e+00,2.075139727032464076e+00,6.999617863858643929e+00,4.004972492948537521e+00,4.783782184444534735e+00,5.428226532398704229e+00 +2.775771947041553389e+00,-1.796823860810985218e+00,-3.435812286850086217e+00,3.328747540357390733e-02,1.713166700014398525e-01,1.287328621636036230e+00,5.768913584533245320e+00,5.364144882553992133e+00,6.220457380072277331e+00,3.279857874780836013e+00,4.382208504444793817e+00,5.760983622424916462e+00 +2.396092869971668371e+00,-1.374140342963602990e+00,-3.324410934220093150e+00,6.241378334643329362e-01,-1.008235022896845523e+00,-2.571874757040344406e-01,6.783227456396678789e+00,6.453556693243497122e+00,4.760031787665699454e+00,1.397159712575776425e+00,3.955895170740390210e+00,4.892141935490703197e+00 +2.664703373581160939e+00,-2.274295006054620760e+00,2.847437087200943573e-01,-1.677062469844210035e+00,1.071593491473754156e+00,5.943089576180543565e-02,8.764846998978599757e+00,3.134175247222279470e+00,4.618905799269997559e+00,3.832999293510881422e+00,4.169523377113049989e+00,4.820177423393641192e+00 +3.514120165236296778e+00,-1.695939107720390027e+00,5.749749943751869541e-01,-1.372251161366488947e+00,1.414936450756299369e+00,8.067203104089537558e-02,6.664487842459541511e+00,2.089710848527895237e+00,7.375027854593720456e+00,8.576140496669221847e+00,6.748639486536131216e+00,2.669498198821129265e+00 +4.079031639245426888e+00,-1.383046694106316643e+00,-2.344893313337678364e+00,-6.250943658791197066e-01,1.328692743772108553e+00,1.289696408214198620e+00,5.123872851709446508e+00,4.754158166466891267e+00,7.226677542929837550e+00,7.811349700243606975e+00,6.399961038027319304e+00,2.505802874791816404e+00 +2.115568895475858469e+00,-1.285696195785905438e+00,-5.410382313438887536e+00,-9.582233749130679712e-01,-4.514660067096110119e-03,6.906817862356203763e-01,4.624298391542023268e+00,4.532322449938698483e+00,5.895034864771226069e+00,4.201755661206154180e+00,3.841171273307598000e+00,3.901458429730415567e+00 +1.407990916849725771e+00,-4.315387699941466426e-01,-2.434441803202209265e+00,-2.143241193463119032e-02,1.991430048897706306e-01,-3.245252085642316153e-01,3.298440951207899374e+00,3.023850958378810461e+00,6.011888360633887984e+00,2.048413650493551952e+00,5.931427026305052586e+00,5.352527421085418347e+00 +1.381153138273853553e+00,1.084288569595046825e+00,1.475497484267841219e+00,1.675123411059225242e+00,1.269439435330158350e+00,-1.045172753525086096e+00,3.103686242816811269e+00,4.080448938513448809e+00,6.668471493814188022e+00,3.319703108792884105e+00,7.769291575371815028e+00,3.920245059994244663e+00 +2.468901196498200612e+00,4.531380836694007819e-01,9.757983087292942947e-02,1.159459620823442894e+00,1.329155575330689176e+00,-1.830503306089610316e+00,3.658159191813463718e+00,6.346035248965537612e+00,7.077166516280438913e+00,4.554699042844648105e+00,5.492605970846351937e+00,4.318612795940402371e+00 +3.211626130273146806e+00,-2.190557335162672814e+00,-4.025770289739019070e+00,2.374106873340321999e-01,-1.204791600774974603e+00,-3.953349523769545404e-01,3.372537397371419665e+00,4.291890175742812730e+00,4.582735795002996326e+00,8.003629851130945383e+00,3.299960257085285065e+00,5.863216575797151719e+00 +7.173325572515190096e-01,-1.440903136015097230e+00,-4.563025185706399256e+00,-7.098001117370973390e-01,-4.208258195771115151e-01,2.594451641987153057e+00,4.269616605460259784e+00,1.966875208797961960e+00,3.226094930911747038e+00,6.020287567355888214e+00,3.980251475991546606e+00,6.606646644611915420e+00 +-5.344161775827541705e-01,6.575126326774392016e-01,-5.766210270892349055e-01,2.080044842860721221e+00,2.597569170595709132e+00,1.316246656649133762e-01,3.840933541237355975e+00,5.846674991729185145e+00,4.098018048564538773e+00,1.600830935090639340e+00,4.628502633768053265e+00,3.585462029244003368e+00 +1.138068288338715872e+00,5.410281705276032138e-02,2.214383099182643644e+00,1.981718727958543980e+00,1.324496153816917765e+00,-1.821106292140853622e+00,2.401856988014751781e+00,6.656277522679868497e+00,4.283437503067081487e+00,1.384202228703339088e+00,5.999512681027482586e+00,1.528291132932185992e+00 +9.991403387521696766e-01,-3.811617114262618733e-01,1.206231109078025332e-01,-2.586171993187720286e+00,-6.952960055817554164e-01,8.046298768301501925e-01,2.150521954723291884e+00,3.733293634828790175e+00,4.514158688834689848e+00,2.548048973349378521e+00,4.355070413414093622e+00,4.427199029031395838e+00 +5.611440100001430231e-02,5.262380750425947884e-01,-4.953603576685280174e+00,-4.320037329811370341e+00,-1.907598969450934878e-01,1.078018569695111006e+00,2.658257906111644253e+00,4.382200981413503449e+00,5.859367423091468297e+00,4.029777956216626933e+00,5.067531354897734985e+00,4.945705073361552628e+00 +3.301613052297283080e-01,1.746862891830231190e-01,-8.118789816136265713e+00,-3.003177430316123608e+00,7.712387043869173286e-01,-8.794142886128657466e-01,4.258293663630688286e+00,6.153065962604465255e+00,7.355119360103516968e+00,8.075010494827537855e+00,4.923466375553967822e+00,4.909788839978878272e+00 +3.663509651997356009e+00,-2.874794400657883831e+00,-3.617695985036166295e+00,3.937863948442860362e+00,5.309231030866371492e-01,-1.234990868812363196e+00,4.339908625421151633e+00,5.340219971922238074e+00,5.515921408322084218e+00,7.798414613260440831e+00,6.873233673651917286e+00,5.826629709744591601e+00 +2.863535086443184330e+00,-2.579467235640812373e+00,9.915726649245656388e-01,7.298812062776931064e+00,3.223797297234991532e-01,2.727270606602266056e+00,4.237569677073842023e+00,3.860558543165812484e+00,4.509396332971866705e+00,4.004854402537130653e+00,5.452430701800734525e+00,4.871357265495228184e+00 +1.213134378267162194e+00,2.413579806889863677e-01,1.303675711696500095e+00,2.444639467618931139e+00,8.915073927774813667e-01,5.015301818356774355e-01,4.572111121969630432e+00,4.512712257921455361e+00,5.355957290748269983e+00,4.794247150280836145e+00,3.660108072753280872e+00,2.429556238521846101e+00 +8.319915033095108825e-01,-5.188516244935168498e-01,-5.687570288109577099e+00,-3.038003124026212198e+00,-8.719733907399778161e-01,-1.464298441207599888e+00,4.048062396070275071e+00,5.403303064625879060e+00,6.407665900443154428e+00,3.134788283354405625e+00,4.228899286686077730e+00,5.406063202984769944e+00 +9.700496358803372132e-01,-2.724024236513598485e+00,-7.053649446370279463e+00,-3.612813816796582422e+00,2.879464430993161628e-01,3.946224160478077891e-01,4.524770878655016482e+00,3.790844656041213234e+00,7.195887404329030623e+00,2.361511342912992362e+00,3.900621344854551253e+00,6.058011910665634048e+00 +1.357224041730048292e+00,-3.094356072793469803e+00,-1.008363536011465111e+00,1.046340672032861185e+00,2.368592569932105540e+00,-3.138197009761437006e-01,4.525455283146967211e+00,3.669994645124985411e+00,6.551577792201502604e+00,7.779296198399536344e+00,3.376143193000717524e+00,3.340663942774611428e+00 +1.202178532925514798e+00,-1.850307710282078411e+00,3.886862348001593581e+00,1.589445207794040638e+00,1.217857516123329376e+00,-9.122994252851901464e-01,4.653375618486451870e+00,5.177928739455077256e+00,5.315906173013113190e+00,6.297915587978544671e+00,1.981820108745978359e+00,5.388309049864337119e+00 +2.245772790529634744e+00,-1.516478417599159689e+00,-3.673322539992259772e-01,-5.204825623456811323e-01,-1.025887496633735574e-01,2.428428862906994556e+00,4.180686233113284800e+00,3.392074251314660671e+00,5.699029251570543408e+00,5.296396093445561171e-01,4.542328457752607740e+00,6.241995262291350599e+00 +1.441119035565789019e+00,-1.826409292124326900e+00,-5.940822212463316099e+00,9.652051478386654004e-01,4.052444759094278592e-01,1.610732939294483357e+00,7.038160704560359804e+00,3.766346935888272185e+00,5.999371763645894973e+00,1.156864407980374487e+00,5.145335831238891267e+00,4.119984700183874970e+00 +-5.465696330630986921e-05,-1.719338771172416624e+00,-6.508425086827772432e+00,2.181828702209119619e+00,1.108177098201627686e+00,-1.781879468348538165e+00,4.807427395222168443e+00,4.255280106544435448e+00,7.009375617216063503e+00,5.319221720194350134e+00,4.137737097068861836e+00,3.233320057309073015e+00 +-2.661176385278094525e+00,-1.395987375772736572e+00,-1.270103392180482293e+00,1.110821262560730549e-01,-1.029079612725555704e+00,-1.728302907832516833e-01,3.457531085129489146e+00,7.444270232949445898e-02,4.591567366097629943e+00,5.417009442651861129e+00,3.131299035156449584e+00,3.515272342145693507e+00 +-4.686224156394166229e+00,-1.850331606786910887e+00,2.184759340112722548e+00,-2.041081623865537598e+00,-1.542782696707952095e+00,4.958978337833580552e-01,3.116982008029290707e+00,1.386033864121762438e+00,3.652533172280717277e+00,4.691688663117650648e+00,4.512583138494862922e+00,4.507587864330768745e+00 +-5.026691791720129920e+00,-9.200819435921131495e-01,-3.295321237037073248e-01,9.793109676417566245e-01,7.543043591655398394e-01,-1.011195992545088629e-01,3.457098235710537537e+00,6.072136861732305491e+00,3.598198391522849704e+00,4.228970068897724310e+00,4.217642302531045218e+00,5.607026952466911851e+00 +-3.511138640213175677e+00,-9.744145346579490274e-01,-5.043254897882221144e+00,6.789267989097658162e-01,3.161919450570618739e+00,-1.171798760336695056e+00,5.461214551316098564e+00,5.287352572723380106e+00,4.399439539341724270e+00,3.500205802814739364e+00,5.656071780409133609e+00,4.741463207851571049e+00 +-3.703237619293762606e+00,-4.488315090217093406e+00,-4.263381366707646336e+00,-1.872655445832931420e+00,1.413418391066827429e-01,-5.997420261799208951e-01,6.633267216554529888e+00,2.943362263037925697e+00,4.342340090444274381e+00,4.751667085587318873e+00,4.529073913784968042e+00,3.139337629007510788e+00 +-3.100678161902563179e+00,-2.501030560910059375e+00,3.785869496888828040e-02,-1.776633578233102195e+00,-6.457530033435663341e-01,3.069899785414676785e+00,5.720232385386712970e+00,5.392964007325415032e+00,4.158870978293465193e+00,5.953350079983978915e+00,3.232746935335669036e+00,5.658934085315204143e+00 +-3.347081575849270241e+00,-1.875566408155557863e-01,1.373805813135958065e+00,-1.175124539027547321e+00,-3.084582261338173481e-01,2.304849343261525796e+00,5.650071425020970040e+00,4.727432440567961081e+00,1.757391563622786812e+00,5.363571028525146112e+00,2.433652791019824235e+00,5.946684745186741949e+00 +-2.549657823098020692e+00,-1.580664376558950313e+00,-2.049692467577199473e+00,1.306927984545027899e+00,4.255303096775275296e-01,-2.131719678615784375e-01,6.492867362025509514e+00,1.882685262636422330e+00,4.711271395093374892e-01,5.254387646473789530e+00,4.361070860586611531e+00,6.288490364663671173e+00 +-4.155153224244512167e+00,-1.638103590486665206e+00,-6.264898634428782920e+00,1.951636392749369175e+00,-1.053489723650152143e+00,3.729657763428335659e-01,6.853313246252220381e+00,3.201130691610447609e+00,2.033728470151814793e+00,4.500078973468650823e+00,4.296945244227881489e+00,2.029974511760805456e+00 +-4.141704456103339815e+00,-1.009012869991860128e+00,-5.401447994645364403e+00,-2.514643434433179170e-01,-2.706026508554485677e+00,3.268482030258669235e-01,6.328235046023614530e+00,4.065915530392780752e+00,5.646001626406993879e+00,4.280884981122723865e+00,3.923160234900610277e+00,-2.746902120133274394e-01 +-4.718113904319100271e+00,-1.802684091603253602e+00,-1.506657146317793128e+00,-1.004083775292140324e+00,-2.609366810774318424e+00,7.097452710287838196e-01,5.863057920820553548e+00,3.192879991044643706e+00,4.833576665416503104e+00,4.142242653072422343e+00,3.255009115094919636e+00,4.959172727951663973e+00 +-5.049019775254446785e+00,-1.381563077028357345e+00,-3.607815631934928047e-01,1.537058909742779811e+00,-2.307742759492474427e+00,-1.690775288074841898e+00,5.308433259777813440e+00,3.269680422232153827e+00,1.538660360555280437e+00,1.342948121733243294e+00,5.136919446565984870e+00,5.150108154036455943e+00 +-5.433552651702445502e+00,2.818885562711461595e-02,-3.279417244823288780e+00,1.545791434687723953e+00,-1.477440056825851222e+00,8.359621473279092996e-01,5.133107815890593173e+00,2.991393703367608570e+00,-1.508745328376193129e-03,2.571244256838248088e+00,4.478464671550294263e+00,3.667520676780307909e+00 +-4.246275329602492654e+00,-6.130663520806219902e-01,-5.845062440614009702e+00,-1.952098650681894920e+00,-1.746264638512076361e+00,4.958869789032507303e-01,5.315079959506445739e+00,3.972743925412632215e+00,2.793269678504993969e+00,5.584940290483441316e+00,3.892535432986085731e+00,4.697240467623889693e+00 +-2.706499974809537967e+00,3.924922185885181269e-01,-4.105156923265348645e+00,-2.044592367564284530e+00,-3.178980144414182973e+00,-1.493057516814380037e+00,5.021757939751064548e+00,6.339447365323519712e+00,4.945394441279042930e+00,5.264834653484440352e+00,3.886343321092996739e+00,4.424725523759203583e+00 +-2.651467077068968337e+00,2.492099512157600394e+00,-7.841257565010855135e-01,-1.377536061630622699e+00,-3.839461700572829983e+00,2.582115106873133392e+00,5.587764431662366249e+00,5.405066853022743700e+00,3.860237202046897576e+00,4.107108869533949047e+00,5.487652977036795221e+00,4.996723475156893635e+00 +-1.921174944783671457e+00,9.412006395738453657e-01,-2.625864606616614139e-01,-1.331777564711734918e+00,-3.478711161053920531e+00,3.905701109020744077e+00,5.133713232906468527e+00,4.494368763835435487e+00,1.307926951608490818e+00,3.830865832317300601e+00,5.178034455680792902e+00,4.871516117668107881e+00 +2.481098347911347979e-02,4.798757746073647468e-01,-4.210669215598828785e+00,1.519657317589790058e+00,-2.204219357018443404e+00,6.409508337405409595e-01,5.855966104246856929e+00,4.718316150574986878e+00,-1.208460825345432532e+00,3.876182703520312245e+00,5.371607561800637143e+00,4.279769579332299401e+00 +5.608947015146614756e-01,3.307660883373833016e+00,-5.302269633643058810e+00,1.154513370240555892e+00,-2.508472975056526444e+00,-9.069255698958078593e-01,7.453500872994089477e+00,6.136685070718774782e+00,1.577779596919128835e-01,5.811348619317231012e+00,4.013359064508622787e+00,4.274173284761452862e+00 +6.918019772089871733e-01,1.466478696550987060e+00,-3.621146700997774914e+00,-1.135210265489079218e+00,-2.270093923835809324e+00,-2.154054312539678939e+00,8.548631459854339809e+00,8.580677927587142761e+00,3.573725713298052664e+00,7.419094220950942287e+00,2.303293360327956396e+00,2.738280109656968442e+00 +5.933258203294379252e-01,-1.131123560438463294e+00,-1.605861339065546733e+00,7.302998686819985874e-01,-2.052409032866207905e+00,8.576733138557379732e-01,8.266456399981672476e+00,6.358139262462048080e+00,2.805261999954582031e+00,6.240747420223196507e+00,3.704750869121015366e+00,4.521147653224271146e+00 +1.445654395659279334e+00,2.089564369019220602e-01,-6.108205565391249259e-01,1.528236749888444379e+00,-2.038589923301223017e+00,3.072549047348906104e+00,6.531145968474424635e+00,3.659707778937812517e+00,1.361490758831767067e-01,4.413772251719586315e+00,3.702561277299750042e+00,5.074465917376252300e+00 +2.763119467305571675e+00,1.169930847882693303e+00,-3.410135203575566987e+00,1.227364972010502875e+00,-3.034243912264597220e+00,-2.259853471281571657e+00,6.228711353324530187e+00,5.390345214270871210e+00,-7.765975881852327234e-02,3.032668654013583875e+00,3.620297372992901774e+00,3.283416564361951284e+00 +1.617596157559101577e+00,9.274198691471613465e-01,-4.341813534124731255e+00,1.443200242866820027e+00,-1.215922849984176546e+00,-3.882836332642096711e+00,5.969829029753539906e+00,7.359973068273208519e+00,2.975172798878382707e+00,3.885288949269857905e+00,3.261339717609219591e+00,4.941919813889841251e+00 +-6.576508653258905657e-01,1.632997792991522701e+00,-4.102480222204741622e+00,-2.327795448610795681e+00,-1.266255394890709374e+00,-1.779812888348156319e+00,3.960817346595896460e+00,7.353020846842392544e+00,4.432314530601575875e+00,6.348169316565152265e+00,2.502149813883460361e+00,6.128471791598121499e+00 +-1.013240803735802720e+00,-8.014628681214885919e-01,3.914986840899608023e-01,-4.182871213923220921e+00,-2.436187298675244417e+00,9.875968433588133699e-02,4.923257732371997264e+00,5.474074476059730188e+00,2.808478061409179993e+00,6.312024025740812228e+00,2.064002107289165000e+00,4.387602522912947833e+00 +6.197151744087525671e-03,-3.149234092173039290e+00,-1.654102488856168396e-01,-3.412852847139776769e-01,-1.374484405133386211e+00,3.008339685703273148e+00,7.066411960496671973e+00,5.893878504480789537e+00,-6.355295163345058285e-01,4.776525929809087856e+00,1.379400472880342399e+00,2.425387915421347351e+00 +-3.904493631610488347e-01,-1.264212846324544781e+00,-2.225130675970304672e+00,3.537047195554293033e+00,-1.110739285306775681e+00,1.763013865152736503e+00,7.669229880295137747e+00,7.033887082352526576e+00,-9.855532089657677020e-01,2.673791490967353468e+00,2.979947600959134135e-01,4.784590574609719482e+00 +3.385595454556619144e-01,8.543168756601032809e-01,-3.998827467911055589e+00,2.792529087814373145e+00,-8.980532521212727382e-01,-3.434307464143248279e+00,6.673272479165506255e+00,3.658839214659448835e+00,2.051941196236152010e+00,1.813070366847800763e+00,1.733569923426339532e+00,6.495342894923719967e+00 +5.457907858885177044e-01,1.406319903308074881e+00,-4.003999994455309519e-01,7.598421744986119108e-01,-2.194432074523071652e-01,-1.521827718638907589e-01,6.771372674723006746e+00,3.146117206284628320e+00,5.052401051425629674e+00,3.103934056600683000e+00,2.788192216146352820e+00,5.148535415200887755e+00 +4.558412795895108172e-01,1.104444297834388106e-01,3.593332594690472170e+00,-4.491949968717320330e-01,-1.249186860862924187e+00,4.848963876141733631e-01,7.417506132955815623e+00,5.706804425219697663e+00,1.215304517010837859e+00,4.780543049014910117e+00,1.268904798642988663e+00,4.344918043171323241e+00 +1.029599828608850798e+00,-3.071971053391178708e-01,3.348362969849801640e+00,-3.252650131375099285e+00,-7.798963774535210280e-01,7.979538575667203260e-01,4.762983994516790887e+00,5.907165620073715040e+00,-2.723269413874936262e+00,4.328361348759194094e+00,2.685950096418174837e-01,3.347897102570498884e+00 +1.859740361719270263e+00,7.446068548254569652e-01,-5.897576147674848945e-01,-2.397830480271298192e+00,-2.495209820399826750e+00,4.455774218736996417e-03,2.135666466920343343e+00,5.503827401310089407e+00,-2.574279522444244961e+00,4.861401535483356184e+00,1.201085947131324438e+00,2.655449252937117421e+00 +7.374741802790536482e-01,2.322398825206866135e-01,-6.388607919241395816e-01,1.558201954142270695e+00,-1.169242667594004637e+00,7.156079652442062233e-01,1.209797746695326470e+00,3.280608636259185751e+00,2.433976210884714053e+00,6.860566217677655843e+00,3.428522783072619262e+00,4.354001919376115026e+00 +1.950582162144144371e-01,1.650484120201193838e-01,9.187050848428088834e-01,2.397232901981297459e+00,8.846211368723222446e-02,1.915130839190936074e-01,2.395860524137094050e+00,4.892194719829085159e+00,5.158820345845165356e+00,6.636110962149698977e+00,3.735949827975017179e+00,6.185136809968994065e+00 +-2.166162878878472142e+00,3.580732411560659245e-01,3.184832216671074079e+00,1.162138682750387453e+00,3.518870334769913510e-02,-1.993834323159580979e+00,3.195440828486001195e+00,6.371795030099142387e+00,3.311349987162779396e+00,5.273050129739965008e+00,3.450213330997203176e+00,5.173169406007716020e+00 +-3.083572858198748712e+00,2.049828050997768880e+00,1.369940080228404966e+00,-1.968064312283657191e-02,-1.465151629194636929e+00,4.167797826275298489e-01,3.621458553235387345e+00,5.113646283697613981e+00,-2.572042403930124621e+00,3.535381998201279075e+00,4.275033910035618945e+00,4.819187497449219038e+00 +-3.759345714741513689e+00,3.866920217978569774e+00,-2.103677830170176843e+00,-4.413827482702351035e-02,-1.884494713061559690e+00,-1.664631160098111740e-01,3.824917394299513163e+00,6.739145715447541818e+00,-1.224305480377220867e+00,2.002436272864535738e+00,4.451854545236884952e+00,5.366281616039342950e+00 +-4.578524685542856076e+00,1.778297781524767585e+00,-2.342573708673763022e+00,-1.198012191420462447e+00,-1.087814814573651923e+00,-2.086338604224302173e+00,4.904593866914863654e+00,6.667415099684595781e+00,3.719124336327231717e+00,5.253414586500423589e+00,3.973608202969440839e+00,3.507768606291574276e+00 +-2.825684221751323832e+00,1.017035095562613956e+00,4.304519462972637434e-01,-1.308161149994459205e+00,1.808840797870643780e+00,-7.167306005669666913e-01,5.610262983927929881e+00,4.565759051248619471e+00,6.723464872045028073e+00,6.383075616093323745e+00,4.233965708217839463e+00,3.808504549699757824e+00 +-1.019503719482324655e+00,4.065875506696979613e+00,1.787319612937320779e+00,3.785871797822473184e-01,-4.859598417414792149e-01,1.860761205100296634e+00,4.634366340916597160e+00,5.953031621103274773e+00,2.001161800363862930e+00,3.602337542990413866e+00,3.525568024920475274e+00,4.793193174110071730e+00 +1.786485900516896486e-01,3.602429925837668634e+00,-3.578127110716021464e-02,1.737775079834363190e+00,-2.223910956932633187e+00,-6.312674771699231524e-01,4.791120446589096638e+00,6.647638521125486477e+00,-3.934713074554867518e+00,3.509959809463994773e+00,2.288498110193317281e+00,3.623256074928376869e+00 +1.377030533515761768e+00,1.868948230854070580e-01,-2.055257803764826807e+00,3.105518687464257521e+00,-2.031096529580536103e-01,-1.283138892990953739e-01,4.735750670689223618e+00,6.241461733830655056e+00,-2.176226623923147585e+00,3.986486597268589449e+00,2.201362078044586390e+00,5.134844960398845970e+00 +8.802257136493027989e-01,6.991610167673090181e-01,-9.424059896640493150e-01,2.691141223314705844e+00,5.549848900595336643e-01,9.033152868585698769e-01,6.019697038791218979e+00,6.081796153028275143e+00,3.477088450765858596e+00,4.767849061984209769e+00,2.942893996062047268e+00,6.421923977825514385e+00 +-1.381855267987889269e-01,3.358675920631862155e+00,-1.269793476586484893e-01,-1.901853822405959127e+00,-6.386590191962148166e-01,9.989271146097631338e-01,7.635877472799798227e+00,3.757307194184470234e+00,4.359092498585787290e+00,4.700958837177121374e+00,3.883273996083135060e+00,3.670302715800173754e+00 +-1.242603731882978346e+00,1.179047161150432732e+00,-6.655607401898910869e-03,-3.935150388695540347e+00,2.071500264828483040e-01,1.416157323149925595e+00,6.199934806643138785e+00,3.645732419870664209e+00,2.646933395806208011e-01,3.664391908397746533e+00,4.501513758231745932e+00,2.208611795199703742e+00 +-1.980575304377616641e+00,-1.023312785625449362e+00,-1.076612247841705283e+00,-1.100154803106146773e+00,3.105740538097447878e-01,-1.219956835939274731e+00,5.805317603722761177e+00,4.728494949076917919e+00,-3.770431797980792865e+00,4.421206940584859524e+00,2.599085846756127083e+00,5.651612737237668327e+00 +-1.516710654879326103e+00,-2.323655834268083531e-01,-1.979048757027759287e+00,1.308402345296668212e+00,4.294516415566843603e-01,-2.959675111221925503e-01,7.029773949233700137e+00,3.801580229547635970e+00,-1.745096512269221023e+00,5.483005158758310849e+00,3.949109605587119098e+00,7.534620038935655018e+00 +-1.008203449196294788e+00,1.018038906638927266e-01,-1.753742555218650745e+00,1.038464361603437469e+00,-6.402963602107260010e-01,-6.128757175426360337e-01,6.356158567751506894e+00,1.796323243505194789e+00,2.363618419278969540e+00,5.121392245046798131e+00,3.177370768889981267e+00,4.039345223032076326e+00 +-1.895911417887634620e+00,-1.353493576124875197e+00,-4.044122617485268467e-01,-1.680421006422703423e+00,-2.941734439100744392e-01,-1.290065374186450287e+00,6.494096475458992934e+00,3.748179576591863071e+00,4.345436684232513613e+00,6.504187099950051731e+00,4.161836780587097273e+00,3.639340715987385177e+00 +-1.010428440734556510e+00,-2.913311355246178458e+00,2.987635913468410231e-02,-3.279635424953385225e+00,-5.459629222529611692e-01,1.379276987703866642e+00,6.519992428468716739e+00,3.821967605340175211e+00,-1.366874441741661173e+00,5.596444265884471747e+00,3.368739435092725110e+00,4.269651927955164794e+00 +-3.836429733507790152e-02,8.992082546887900651e-01,1.676376391647645958e-01,7.869692148824878331e-01,1.483872795671443257e+00,-1.291049843292832566e+00,6.845329173990874594e+00,1.840953010689554414e+00,-4.601262519633495529e+00,2.963292356885109502e+00,4.284365403802065764e+00,3.582885009051824721e+00 +1.133727084981246636e+00,1.667861749572196128e+00,-1.002822474434479805e+00,4.399034951128378168e+00,7.289220369490916340e-01,1.313710382185457659e+00,7.786877049577780596e+00,2.650399780779336911e+00,-2.175503178058279197e+00,4.858432397219559107e+00,3.588870920164624501e+00,3.967739611535218369e+00 +3.028928456183230367e+00,-2.343504079657432904e+00,-1.735220721547379163e+00,1.502014569001267175e+00,-2.000598529814214821e+00,3.063481019302902730e+00,8.711264538275468539e+00,3.939224384803575507e+00,2.306019998494043044e+00,3.869607166962059530e+00,4.493807489500436070e+00,5.438974973144797609e+00 +1.912884418398078434e+00,-1.017257808087804527e+00,-1.563967274598344614e+00,-5.979828118169634443e-01,-1.818763994069724443e+00,-8.957388571460802584e-01,7.842133100651661692e+00,4.706804462653856547e+00,2.062018851046009793e+00,2.013173749054140593e+00,3.174823864850833566e+00,5.055864408312224612e+00 +1.936123025836812506e+00,-2.881807603031398823e-01,-6.322762488805152570e-01,-2.405318421708155241e-01,-1.117212170935864696e-01,-2.089224422518605184e+00,6.859814222292866859e+00,4.305888056446495860e+00,-1.504652401585079957e+00,2.724891608993822878e+00,1.953893529791669970e+00,5.706103435960124592e+00 +1.618221552723499768e+00,-8.002646754975277776e-01,-1.541544171952438758e+00,-5.183997404836990786e-01,-1.770290809661849929e+00,-7.887086637616680385e-01,5.784084225940442359e+00,3.699792675914003404e+00,-2.693492530826771869e+00,3.368124005382250985e+00,3.150094770688928847e+00,3.256005576777363686e+00 +2.080028156160730113e+00,1.305113373605160998e+00,-1.579242564349136346e+00,-1.031759963327423035e+00,-2.296390069112169030e+00,1.113842869659726453e+00,4.258537816670777687e+00,2.666290978369139530e+00,6.042940022593139560e-01,3.276888245125996413e+00,4.134579687760838063e+00,3.667555485766595069e+00 +2.067576750771584759e+00,1.650518389633325311e+00,-4.526871086243589026e-02,-5.336818663506124327e-01,-4.757304941173887691e-01,-2.213097572202314289e+00,4.381036304112623014e+00,3.610682949304262923e+00,3.614310001038992581e+00,3.483275795388367513e+00,3.583129797372695435e+00,6.389563758440964136e+00 +3.786017738921322096e+00,-4.475021169250421038e-01,1.094641211652286561e+00,1.816604243070493174e+00,-4.137960824756629630e-01,-6.128853266638312203e-01,4.872969010800280110e+00,4.909625339754276396e+00,2.644042578184392145e+00,4.099073312922418388e+00,3.757909776778407540e+00,6.151163093415396688e+00 +3.483896602799633424e+00,-1.711015863873070142e+00,-5.104956170019500084e-01,2.859627247942449380e+00,6.568479639301256778e-01,2.204250097643947193e-01,3.773568767145410341e+00,5.060001513972095744e+00,-4.371107711584038213e+00,4.335962442428765939e+00,2.918503197900284896e+00,5.090574584299498717e+00 +4.576394942912266117e+00,9.426496451287432743e-01,-3.019032842906256509e+00,1.798851426301030454e+00,1.871164952159827033e+00,2.498610384954699504e-01,4.798982163132473389e+00,5.230029489935797749e+00,-5.885922879260419904e+00,5.282686299998096580e+00,3.374482995803665730e+00,4.608386496870978455e+00 +4.163200436344271615e+00,2.165441207107301036e+00,-2.376162131368675823e+00,-1.205931204947916502e-01,1.637935602027619009e+00,2.460981688298094383e+00,3.263146420542350601e+00,4.191488902458624821e+00,-1.815598761535365924e+00,6.212258418129863458e+00,3.817028694278706613e+00,3.317411050370447256e+00 +3.928017036429528375e+00,1.333673631104955071e+00,-6.458178088186522503e-02,-2.614809517409372397e+00,5.404482446485152636e-01,9.982797489639411159e-01,1.248171468675097984e+00,4.857115197313684796e+00,2.164447559332587367e+00,6.784390463654808912e+00,3.026009329226857947e+00,4.269082725882513252e+00 +3.342461135564160024e+00,1.813969413192768165e+00,1.271570493954519598e+00,-3.241552141702179313e+00,-4.107566534691116100e-01,4.554059273203924230e-01,1.474089932010937964e+00,5.396862418488866275e+00,-1.335624732277507221e-01,2.423500390873731813e+00,2.723503842105398753e+00,5.101459630326847794e+00 +1.799652716973078714e+00,6.944756218125078817e-01,-4.194005994748716826e-01,-1.083790345331692162e-02,6.616246977677542329e-03,-1.257618932044302751e+00,3.330856688646777641e+00,4.323617273943575867e+00,-3.618646261984713064e+00,1.108704578913731531e+00,2.418932105367774810e+00,5.699769803981489247e+00 +1.672723233610053040e+00,-4.900712705065462388e-02,-2.612987182637256378e+00,3.631112288887047956e+00,-1.299422808443320698e-01,-2.561113511042099944e+00,5.022410746851131869e+00,5.238189687817999207e+00,-5.493461308644601360e+00,5.161521639309377463e+00,1.877491626970969385e+00,4.035544796621502783e+00 +1.611958406255827869e+00,-3.091732562761744241e-01,-2.765880768238859311e+00,2.818067235798883630e+00,-2.986442384781844983e-01,-9.983095628402856025e-01,7.515330779140204420e+00,6.175506103179614570e+00,-1.835135034308897239e+00,4.823193971936775348e+00,2.719501996436998414e+00,4.329517824313737329e+00 +-3.032516886443232806e-01,-5.896004093191058448e-01,2.372919865267560935e-01,-5.532374295282149745e-01,-3.508148009550305368e-01,9.537541883800373377e-01,7.701903546473512385e+00,6.350400019395164186e+00,9.933807002980135081e-01,3.613308445084824072e+00,3.691216874039356277e+00,4.982059011101596546e+00 +-1.337959007393631605e+00,-1.002276626933851444e+00,9.579227200033106904e-01,-1.678868230406534767e+00,1.037460164619667724e+00,1.764842439121176332e+00,7.461032443895633470e+00,5.762777110943723358e+00,5.819435959838310524e-01,2.926436880405554941e+00,3.826542669273640396e+00,4.704638029968001511e+00 +-1.233022460530211362e+00,-5.743711226531944547e-02,-1.380807697986605742e+00,-3.842982498171382044e+00,-3.134900402257079222e-01,-1.635880325408218816e+00,6.701998499399708820e+00,6.226440851859729619e+00,-2.691285694757134195e+00,5.804829449411999853e+00,1.525138823745269434e+00,3.493815860942671492e+00 +-1.865205496337602575e+00,1.431710762843490192e+00,-2.718995054907896503e+00,-4.593443294069448513e+00,-1.383122156873124453e+00,1.309052096583116498e-01,2.337293587335182110e+00,6.260320734918286334e+00,-2.818298318179963680e+00,5.260742496647727862e+00,3.769612237166617419e-01,5.288363853086943678e+00 +-2.206628938695722741e+00,1.551514493449069221e+00,-2.413132556106665660e+00,1.178316695476375520e+00,1.457335410974212042e-01,1.204115957202990028e+00,4.698235367745073887e-02,4.896590014458261031e+00,-1.378430015980649515e+00,3.170740511370717662e+00,2.494619471374079023e+00,6.962789005562388667e+00 +-1.860135976104381861e+00,1.798906615382426466e+00,1.434350091326882515e+00,5.291584491944156809e+00,6.768586675674830300e-01,2.077446917403265036e-01,2.813578563378360542e-01,4.655291061699078803e+00,-8.382086229604484018e-01,3.621502355422674846e+00,2.593844517273805383e+00,1.607291002811817915e+00 +-3.124042680743388534e-01,7.577307867890256121e-01,2.121353399933312289e+00,1.350895633914931615e+00,-2.789513697782860513e-01,6.753067582859945128e-02,5.177064883997042344e-01,4.187754565173992916e+00,-5.103936229277223902e-01,4.061669028788871927e+00,4.315741458371293859e-01,2.824313347012796793e+00 +1.130011797480357849e+00,-8.391072593324180318e-01,-4.237611371298914964e-01,-1.933216574495730145e+00,2.742387868777406146e-01,-2.994752019428656986e-01,2.576689234901710979e+00,4.197275424097892227e+00,-2.993473679205940208e+00,4.366762917006225209e+00,4.797295118434252714e-01,5.492514421382309386e+00 +6.013461290273438564e-01,-7.688129699543494455e-01,-4.897451212908876528e+00,-2.480162142973378891e+00,2.630291790748110259e+00,-6.105555302220355696e-02,2.831441988274062460e+00,4.603751886326595155e+00,-4.760866155975501002e+00,4.405816125861661270e+00,3.255181258612026785e+00,4.946983860710324699e+00 +1.826389000794743867e+00,1.360554260325545606e+00,-2.665441211971079749e+00,-2.956296864889385922e+00,2.293554557599212451e+00,-3.452188178759938442e-01,4.107295149898662956e+00,3.829840697084754275e+00,-5.231379965522780395e+00,3.965474815393267960e+00,4.403908204209739274e+00,3.258035887312124679e+00 +1.394651957441011003e+00,2.188037568200167815e+00,3.918724289346349199e-01,1.396245913849419318e-01,1.029139151119064621e+00,3.966988293802046206e-01,4.213206276829068564e+00,2.317514670608773919e+00,-2.288681983437578005e+00,4.481009799782624370e+00,3.195088291628811916e+00,6.864843505746224928e+00 +1.317933650233626075e+00,6.196489704735803627e-01,5.453711560110505729e-01,2.872940919956571637e+00,-2.861319974669730426e-01,-4.446493412264039824e-01,5.341245468866937074e+00,2.402731574217447097e+00,-5.277416786616644373e-01,4.779774807980386342e+00,4.143791484593146279e+00,5.334646907298597363e+00 +2.078369651602372858e+00,1.055758634535370888e+00,-3.462383431273309942e+00,1.678371156089661920e+00,1.137865298635378331e+00,-6.250099977772416437e-01,7.340207577136370176e+00,3.994784883241460527e+00,-1.744308929395891106e+00,3.878048144569627986e+00,5.605253434021880032e+00,2.521198941506532343e+00 +3.149956991405946649e-01,3.999108922925003196e-01,-3.508493449203123227e+00,-1.183807484020850787e+00,2.048243675682622555e+00,-1.041283196264988886e+00,8.555754908569184636e+00,5.741224396017981846e+00,-4.554342668069113031e+00,6.039359007999831164e+00,7.062085973272534289e+00,4.566203609589495116e+00 +-1.335484915511099269e+00,-9.496044867575319515e-01,-2.101464349861160485e+00,-6.019396382394651690e-01,2.167658385589843029e+00,1.143199269311364397e+00,8.813808214112786033e+00,5.019559988327137567e+00,-6.275070551017202902e+00,6.788630174725600597e+00,5.175258844591393625e+00,3.385675944182517050e+00 +-1.762311053362322522e+00,1.495196654900621702e+00,-6.786751093886456143e-01,1.226921069235455075e+00,1.144931843234056057e+00,2.092742335952448407e+00,7.687264647442435539e+00,2.181909398040426940e+00,-3.808404812229319703e+00,3.953596456987713559e+00,6.473631774814190720e+00,5.310460369457939755e+00 +-1.242235850301086186e+00,4.067472821829558605e-01,-1.266287029602956249e+00,-4.473018400253332416e-01,1.520755709265585232e+00,-9.731500771403120975e-01,5.208368016111155185e+00,3.098806325055673216e+00,-3.095765099554668609e+00,4.673366002556873511e+00,7.936264835799946127e+00,6.124787052766116346e+00 +-1.047907593483522470e+00,-2.688763226369760506e+00,-3.113089820761620885e+00,-2.512693119715514456e-01,1.846380876804381899e+00,-2.282894962031603470e+00,5.906650769498868137e+00,7.191220081857573554e+00,-2.519878654915277849e+00,6.553610623994441831e+00,8.380986906793040703e+00,3.584350310865875411e+00 +-1.844627464125474514e-01,1.122766995630273268e-01,-4.121025287637389845e+00,1.797875294197349039e+00,1.585009272754673182e+00,6.660819244520835847e-01,6.893875111118498822e+00,6.874602615116323534e+00,-4.767729824381188308e+00,6.979311980368485102e+00,6.773564231786926904e+00,6.240392836728278603e+00 +-1.553126461549380721e+00,1.428128698225768156e+00,-3.387762219870576352e+00,-5.811216978785012088e-01,2.980713220670674346e-01,1.450360386619919950e-01,5.111534095669602706e+00,4.052598079611519744e+00,-4.344976218930746370e+00,5.539882879186871278e+00,5.024728618775607281e+00,4.627643543827098860e+00 +-3.994786779790023612e+00,-3.030500860844555167e+00,-1.574365772133773156e+00,-2.334190637396901913e+00,1.041007653499790853e-01,5.435714176373549478e-01,5.284802151532128178e+00,5.099647470951653716e+00,-3.371047890540605607e+00,2.211015009308235690e+00,6.168632169582965119e+00,4.081638605238643080e+00 +-4.951401337488436916e+00,-4.281597413464874435e+00,-1.446965890712373914e+00,-1.532118915967320305e+00,9.840892849850388924e-01,1.695384535337644216e+00,5.045181307419701611e+00,6.578181144299732885e+00,-1.598630209145174952e+00,2.396734116344834131e+00,8.386117100329082419e+00,6.105214003038710757e+00 +-3.768335176734624703e+00,2.821499338348825159e-01,-3.841967183343977688e+00,1.219078443502037201e+00,3.382258767711604719e+00,-3.293032198175086522e-01,5.688952638614140689e+00,5.508928573275909457e+00,-3.851911527026878446e+00,4.279345597822666214e+00,7.042749744856436678e+00,2.811021302569785441e+00 +-2.000378725346092956e+00,1.721205612915869709e+00,-4.147184173872242141e+00,1.613977079281302185e+00,1.245829365421335932e+00,-3.110864252931751506e-01,6.006974816112244930e+00,5.176304910092258460e+00,-5.261539750925154024e+00,4.346664796174861678e+00,5.904321977862432291e+00,4.917188698982512207e+00 +-1.529114688558853796e+00,-5.520735524876594358e-01,-2.656566963244263668e+00,-6.190376217729831598e-02,8.875057108964318209e-01,-1.866651543240889777e-01,4.284550674581560692e+00,5.405435297673074402e+00,-4.775343473267573913e+00,3.799120905352531885e+00,7.318339719949468858e+00,4.545836912879165581e+00 +-1.382077426139134202e+00,-1.460745776425037112e+00,-2.296335121286493530e-01,2.238441972820929271e-01,1.636529721440337148e+00,-6.973505035377318695e-01,4.318230763979286735e+00,4.610885275325963484e+00,-4.158230463276117561e+00,2.544854833773063874e+00,7.518444240314048344e+00,2.399475115996510866e+00 +-9.776006139457309452e-01,-1.267066756597080346e+00,7.938082447649490447e-01,-1.413220720731109825e+00,2.171566498567953474e+00,-2.290059544218771670e+00,5.087184242756512376e+00,4.886844020442945968e+00,-2.652016073078574010e+00,3.729252890668482756e+00,4.806585388871521758e+00,5.511388409605797456e+00 +-1.302065558268684642e+00,-4.226338953756407224e-01,-1.123615705427958300e+00,-9.783303329218313582e-01,1.007122282374927735e+00,2.021121699101357194e+00,5.785845498026464639e+00,7.340906335593323284e+00,-3.094773528188477840e+00,8.037933946174717903e+00,4.708212886471146774e+00,5.125970724634678177e+00 +-1.605904370796717107e+00,-1.232593778691534192e+00,-4.337633722520342516e+00,2.543933438651938550e-01,2.444625552455861772e+00,1.588729289098186337e+00,3.946041794826312454e+00,6.208758917985891301e+00,-4.361938518167196577e+00,5.252633179210798531e+00,4.261297571469822465e+00,3.252001104075056492e+00 +-1.367407857803577098e+00,4.737473641267300195e-01,-2.935007695235575476e+00,-1.540156451526306203e+00,3.867695856989417535e+00,-2.178328585195230538e+00,4.309242335980648875e+00,2.485156214386875728e+00,-4.796033594615050077e+00,3.794170355583785792e+00,5.729969752339677314e+00,4.005477881434223342e+00 +-2.255653640435242302e+00,2.505874133578036123e+00,8.426518445301720006e-02,-1.024343935297204577e+00,2.751911341281242152e+00,2.095966177392705743e-01,4.137847859460531019e+00,3.843512617322950575e+00,-3.612947540317435369e+00,3.221189136472135672e+00,5.671551942901556842e+00,3.375065468211977837e+00 +-3.921030993293422640e+00,1.630507909858668614e-01,9.695086723449181099e-01,1.016718128066200100e+00,9.242379484877117912e-01,7.640466204237239545e-02,3.887114122371945868e+00,7.938872834349187890e+00,-3.930522720000166004e+00,2.642365263037671497e-01,5.839611272192583691e+00,2.237288727564263002e+00 +-1.936331010084744397e+00,-1.525296869443792058e+00,-1.887927094574152198e+00,2.889296467732928253e+00,6.674888846926975150e-01,-2.226209497065793741e+00,5.147626517807525559e+00,8.101203583561597199e+00,-4.036529054131140271e+00,1.460043503492038042e+00,4.555431949381168799e+00,5.518934066493400792e+00 +1.089177159792090732e-01,-1.480781935838115793e+00,-2.662620779159473372e+00,1.034614556451587841e+00,1.577832789279951964e+00,-8.318449178559828017e-01,4.720014009671788635e+00,6.953736907057773919e+00,-4.647597512732412284e+00,5.850995166966205474e+00,3.801198928568325641e+00,3.967097800413919018e+00 +8.857813357540065269e-01,1.626430617356564579e+00,-1.705199505776182178e+00,-3.030289740963192813e+00,2.396803525729378048e+00,1.417714739066025853e+00,5.888658474261237785e+00,5.147574616784825530e+00,-4.378121367908962114e+00,6.743064673446346191e+00,5.278554693669795128e+00,2.382535524532037652e+00 +1.379769462219410325e+00,2.641558084995923039e+00,1.825251076082317070e+00,-7.221922592021288168e-01,2.285832806744357182e+00,2.454150858434434923e+00,8.429248908043021515e+00,5.832642052619546824e+00,-4.493401982967586861e+00,7.640750679033470050e+00,7.192202538343089202e+00,3.930856752560546674e+00 +2.535850865204160254e+00,1.747903449949555599e+00,2.352021208472721003e+00,3.861460703360300784e+00,1.219586203910400179e+00,2.687019828144873168e-01,7.327886179081453122e+00,5.419198983552933058e+00,-3.686637380756415894e+00,7.427129093163848950e+00,7.238967956017155814e+00,5.079134620182847115e+00 +1.918686482430989937e+00,1.901521804749370936e-01,-2.233122763915311726e-01,3.527070754360397853e+00,4.868582213573212636e-01,-1.998178446545642029e+00,6.021843205344657335e+00,4.060183028189632282e+00,-1.173286360505835901e+00,3.527551104234932566e+00,5.874570741275926267e+00,8.095694785062841348e+00 +9.834708323740255498e-01,-1.767532034523959350e+00,-2.275029374245872038e+00,-6.330186885629787152e-01,4.817738872638805958e-01,-1.738891492324635024e+00,4.938549126197735006e+00,4.321312469822547797e+00,-2.291676683720838881e+00,3.359429581762446659e+00,6.659601894279155054e+00,4.369361129868212679e+00 +1.130922016234996885e+00,3.501437177607986673e-01,-2.197358340644369745e-01,-1.733438673028740329e+00,-4.798647781829540326e-01,-9.449763468903299435e-02,4.346695488232399818e+00,3.764062693634334433e+00,-4.867940086837688085e+00,3.469323980260952567e+00,8.250621485366245622e+00,3.199225863983878870e+00 +1.987774937541078035e-01,1.621324503807580308e+00,1.471824066182199298e+00,-1.574909450028733060e+00,-1.444874973655196015e+00,1.828513539814531796e+00,4.486520860130791988e+00,5.335209395860639248e+00,-6.103478888992682982e+00,5.295324495597192538e+00,5.824635096502237985e+00,5.422301366413537771e+00 +1.181252098985606347e+00,4.134968344910459059e+00,-3.613664434624896948e-02,-1.855481006435167224e+00,-1.404220683081923049e+00,-6.819517137232335990e-01,5.418137778231014501e+00,7.113217943178534064e+00,-3.589890359574852141e+00,8.317154553906529912e+00,5.537201981927316830e+00,3.586261854092550827e+00 +1.856101904956906079e+00,3.925455888329150156e+00,-1.552381011240769881e+00,1.217705537328990228e+00,-5.337796791751940884e-01,-2.701922740527604727e+00,5.226323506836971333e+00,5.226959111586891638e+00,-4.693813412962244414e-01,6.616116507490186649e+00,4.891452878561917572e+00,6.033164740847288599e+00 +9.164264286591120579e-01,9.674231528967295324e-01,-2.080074393406135425e+00,2.693222771650090053e+00,-5.630122689619707632e-01,5.148341904779039080e-01,6.093970864738022186e+00,4.375989109329739613e+00,-2.262852418509285535e+00,3.253090895327358290e+00,5.143223292101271049e+00,4.424456203474296245e+00 +6.455961493516636507e-01,1.280927382243075563e+00,-1.905045654688749313e+00,1.415314383325836811e+00,-1.366699782267320584e+00,1.488353364690308878e+00,5.993014095948725029e+00,5.663699500937534381e+00,-5.375949927543590690e+00,4.127864241623477426e+00,5.749506903894103793e+00,1.749369021285917647e+00 +1.579146930776830771e-01,1.672634254975262635e+00,-1.481153727250524854e+00,1.130489333255395579e+00,-8.576220968497062502e-01,8.751866702420707522e-01,5.507422646264366506e+00,5.434873638195526269e+00,-5.476225810067839816e+00,4.600462707919646732e+00,4.738172189305659643e+00,6.056550584723551012e+00 +-2.909607925415798046e-01,8.724720745357823493e-01,-2.368999111916265665e+00,-1.715903225118668995e+00,-2.148380708258881278e+00,1.183998800131415452e+00,5.349573112733391866e+00,4.165787047009397170e+00,-3.323558655156207742e+00,3.223375661647526425e+00,4.119277002685489464e+00,4.599970682298828883e+00 +-3.306645713722670132e-01,1.909979759437071323e+00,-2.031465225505573180e+00,-3.587823934881999133e+00,-2.860600747815385425e+00,-1.987472501142117043e+00,6.256079522580288099e+00,5.830789907284640528e+00,-5.847171202702305948e-01,2.975383640935380214e+00,5.490481902136949977e+00,4.387879926982361845e+00 +4.263228177411126918e-01,2.157561582795937127e+00,-3.316935045948691041e+00,-2.940545435776954264e+00,-1.864903705361835806e+00,-2.524928902417629573e+00,6.628072144779578068e+00,6.395597652102816610e+00,-2.696110597254691044e+00,5.952264941109109309e+00,4.257662994441342441e+00,3.703930638641808493e+00 +1.193366732033157351e+00,9.826325589930731130e-01,-2.516527984552007879e+00,-1.153359427414173410e+00,-1.706924925159805539e+00,9.752757465543320237e-01,6.304456315745787798e+00,5.489217295081757264e+00,-4.712329429705228279e+00,8.394840572117590938e+00,2.978300494010344579e+00,3.193680706741855779e+00 +3.602778714187880871e-01,1.836253113768347323e+00,-1.211580301569616491e+00,8.376741731740589003e-01,-2.925237604631134136e+00,3.386650484928460436e+00,5.494203647555167436e+00,4.661456842424625435e+00,-4.605837326283063859e+00,5.816965219020975297e+00,3.351541411691269179e+00,7.093379900479444444e+00 +7.232824621865587833e-01,2.360273753448378642e+00,-2.119738764212124593e+00,3.166679016710596706e+00,-1.933442555463390278e+00,7.646785509822175975e-01,5.160871861802657889e+00,3.943309158431016037e+00,-2.819566607777576728e+00,4.778156170584293427e+00,6.104584871203494245e+00,3.806052101117367137e+00 +4.895188127295139746e-01,1.598400661269894441e+00,-4.059440643799565152e+00,1.761520509834790360e+00,-6.930824119684195761e-01,-1.602894020209125081e+00,5.354020572274483136e+00,5.675753062682611372e+00,-2.074769113012789212e+00,4.534992326556606557e+00,6.374140926240183802e+00,2.995667537325232388e+00 +-1.417146712806542030e+00,1.162540749823484543e+00,-2.821661381645870215e+00,-1.864701162965590076e+00,-2.148300520105935973e+00,-3.204283294137773574e-01,6.828004225253447856e+00,4.848636180667225126e+00,-4.807445855863754502e+00,4.322166784119967353e+00,3.357618936239717744e+00,7.477707704377309028e+00 +-1.223100535472626760e+00,2.399805049786160716e+00,-1.881675152841980392e+00,-2.051458864990816977e+00,-3.036353204927186411e+00,9.162349161972951350e-01,6.747155584887854829e+00,4.424131394752608415e+00,-5.172862866739347254e+00,3.714503085883650080e+00,4.530739860216478121e+00,5.588621204537695952e+00 +-2.331620117532256842e+00,3.091363422104554726e+00,2.498570506946000691e-01,1.156413474997386004e+00,-2.338453850701846193e+00,8.381982775322296408e-01,5.346875066248663266e+00,6.416893309367025289e+00,-4.329012813825567463e+00,3.400418611958983206e+00,5.652205932632920593e+00,4.984056855741751058e+00 +-2.663511555637593009e+00,2.035013611904984643e+00,2.174964038665827548e-01,2.002099711433645624e+00,-2.770177392028111640e+00,-3.737854810035729503e-01,4.668187886431102385e+00,4.793161363316138512e+00,-2.160875966280079830e+00,3.039640789342107574e+00,5.039753264893064078e+00,4.928799220242923163e+00 +-1.080341765991820191e+00,1.594274693509639906e+00,-2.597492751680841927e+00,-5.159732943759433033e-01,-1.862695010933665074e+00,3.223788650687365864e-01,4.396700210807001241e+00,4.087088907698947615e+00,-2.788140969679981040e+00,2.753411555394968069e+00,5.138471736541990964e+00,4.463078872331661451e+00 +7.757674270661427673e-01,8.185867493298183417e-01,-4.025774562613177565e+00,-1.778381660850697932e+00,-3.344381828802922119e+00,2.261965511720036570e-01,4.697408133444811895e+00,4.072430739626219598e+00,-4.819173401705638682e+00,3.401845223177137001e+00,4.451556870688682999e+00,6.392728433143465949e+00 +7.611104818252110160e-01,-8.286425486551746156e-02,-8.714099597121953700e-01,-1.392322392271791331e+00,-2.010274344700349047e+00,-1.187859749674115228e+00,4.220067923014012123e+00,4.198593555867003779e+00,-6.339609891012742082e+00,4.219375112814423012e+00,4.609282730777004566e+00,3.743959361957657883e+00 +6.790603894908597837e-02,3.587434096184727306e+00,1.621506077846058824e+00,2.664330740287865318e+00,-8.178962447624227305e-01,-2.098826921279402513e-01,3.445800902112551078e+00,4.990575469785755836e+00,-4.415085684552426670e+00,7.354874196964784616e+00,3.987913881285179318e+00,2.078377777453091824e+00 +-2.114987211631109421e-01,5.188606737658716384e+00,-1.843969134213084282e-01,3.702957121339581548e+00,-2.006425116542370546e+00,2.737147109700642122e+00,1.169552537800597314e+00,4.655034887063085769e+00,-2.251466020449351291e+00,7.997480528203591454e+00,4.994447217168600517e+00,5.452196236741163382e+00 +-2.044273108467230760e+00,5.835049444426541054e-01,-6.446928228212237322e+00,7.850216573166129974e-01,-1.431515617724336220e+00,-8.556040301668454973e-03,2.805575485367272748e+00,5.545649097145647133e+00,-2.639006556732359954e+00,4.314394547457116147e+00,6.591809101603595344e+00,5.870140067503667680e+00 +-1.101726457346017263e-01,-1.620274964016493735e+00,-6.641148523763777511e+00,-1.094626423534233561e+00,-1.244275022996923497e+00,-2.347922786427476094e+00,2.986861694153729374e+00,6.013670007454654964e+00,-4.749572626239833895e+00,8.989575797021882586e-01,5.871049258764507783e+00,3.692157962858390707e+00 +-1.355492099570395625e-01,-7.710785816490806077e-01,-7.460042509833899604e-01,-1.431969779711421920e+00,-6.658354460507526218e-01,-5.212556798169548644e-01,3.640715501686690558e+00,4.199800461076788949e+00,-6.923924389413910419e+00,2.409415786856320629e+00,4.723429037241177575e+00,2.118230114873473902e+00 +1.066566345600753474e+00,-7.877368489464271484e-01,2.450416683951246011e+00,-7.293212181132470162e-01,-1.831209305352096139e+00,2.128019456441309909e-01,4.630997173162287872e+00,3.153504864869932867e+00,-5.121705788364115719e+00,4.961928494411546708e+00,5.848549132384527560e+00,4.270064162359655136e+00 +1.193425803725096213e+00,-1.896760202919263172e+00,-8.562759139084447080e-01,1.806142938563724432e+00,-1.942094921675173502e+00,1.854619225282396755e-01,3.595501587468281190e+00,4.159012662003547689e+00,-2.807186266658454166e+00,6.385983535919036669e+00,7.004357507056047538e+00,6.747016245236693521e+00 +2.083761271791886305e+00,-1.276720735151101849e+00,-5.479757520210990052e+00,2.606765689959579024e+00,-1.441777419428687645e+00,-1.139184854626122068e+00,3.426172540266504640e+00,5.032483969662179923e+00,-1.070486597603297341e+00,3.698943296325017283e+00,4.604898685711097173e+00,5.158872725905403023e+00 +3.913190663892308852e+00,-1.838204721789715279e+00,-4.340440974661143869e+00,8.709417064315090329e-01,-1.368377580730799625e+00,7.857745914260003683e-01,4.206649879981476658e+00,6.144795896611343622e+00,-2.978755867683346104e+00,3.100088655314563901e+00,3.913506554336225296e+00,3.186174085507160303e+00 +4.353600527808068499e+00,-3.456748851778082887e+00,-9.091876232287365855e-01,-1.666746966739525160e+00,-3.835541755542447362e+00,7.176915106309048609e-01,5.999160702075048590e+00,6.123954378276296850e+00,-3.942832587262437016e+00,6.505938216379314909e+00,4.494098247601695739e+00,4.785774580268421730e+00 +5.751111549400718559e+00,-2.606365152484285463e+00,2.712259588367513796e+00,-3.002823022246964069e+00,-2.501744939896655584e+00,-5.772774549715808945e-02,7.052118926317236358e+00,4.556194363128761360e+00,-4.652746919986393337e+00,8.969322605193864817e+00,3.338448377151302715e+00,3.847965444282013703e+00 +4.412564108108409044e+00,-2.197886789007060138e+00,-5.546879011168117657e-01,-1.945269973105419936e+00,-1.544695102302104939e+00,2.213139708179573395e-01,4.681231933766181186e+00,4.183559885287595037e+00,-2.610332668439564685e+00,7.894978062034469168e+00,3.369021545659768080e+00,4.544912566977936663e+00 +3.799001718987409681e+00,-2.137054923626828806e+00,-4.457079794441980880e+00,1.226395757840899492e+00,-2.030637715223882367e+00,8.683575779812516271e-02,3.111532903220852297e+00,5.466003107294980623e+00,-2.524675485755766502e+00,4.537313889249457333e+00,2.228549041068663694e+00,6.596994726986956437e+00 +2.235099628414229045e+00,-1.228748151889619278e+00,-6.116059378706807159e+00,2.226761373624125628e+00,-3.167429537191251754e+00,-8.899723588492577941e-02,2.739760515153622400e+00,5.488751323872037347e+00,-3.267075187526824465e+00,3.180835210134046687e+00,3.677150338628789150e+00,3.917626231455117569e+00 +1.584180587095145309e-01,-7.852655966666166609e-01,-1.812135908497642056e+00,1.811381571465613849e+00,-3.868721454666545156e+00,-1.305486931701104858e+00,5.212141177367930922e+00,5.083612625330649948e+00,-4.227537965591766422e+00,2.436317686035841312e+00,4.677542309159430367e+00,2.750568805364736136e+00 +-7.766652041322625877e-01,3.440854525018117727e-01,1.291619621175254196e+00,2.833256115689901211e+00,-2.566560115440099654e+00,4.448779037142116533e-01,4.720370609089600933e+00,4.390812785928678963e+00,-4.009024438940519630e+00,4.226275748530937015e+00,5.640016549047624395e+00,4.977936640280186786e+00 +-1.666461426108118760e+00,1.785540568414713203e+00,1.805973667163953467e+00,3.702551833821284832e-01,-2.318949619335108014e+00,2.193260776215153474e+00,6.501093851038027616e+00,4.097104561187784988e+00,-3.380629817883459509e+00,4.993965235226390575e+00,5.038063111393355165e+00,5.840642169186212485e+00 +-2.410407352193815633e+00,1.827937464828485847e+00,-2.753275686170964853e+00,-1.916569669564210932e+00,-4.188875821867545923e-01,-1.434728816777196769e+00,7.222656113864077909e+00,3.863332368140850015e+00,-3.557266307242549885e+00,4.348824044871342664e+00,5.264486958202707179e+00,4.345164796256563378e+00 +-1.666239550604287967e+00,-6.080677676092067774e-01,-6.016779562497914569e+00,-1.597834161041584933e+00,-1.273227400451313995e+00,-8.484560339955301567e-01,6.599063039814783593e+00,4.026283348567725184e+00,-4.751951651617570604e+00,5.398147519999094257e+00,4.362588794554010718e+00,4.096358997545670455e+00 +-1.655180023490592456e+00,-2.622239271382690973e+00,-3.750602427130763861e+00,-1.738716375420507498e+00,-2.284581807216647942e+00,2.062685576388656017e+00,5.465498868731979876e+00,5.571345962541135677e+00,-5.342053777125769898e+00,5.617987145165654006e+00,4.189733685873055613e+00,3.577745219813466715e+00 +-2.004071539620203346e+00,-5.295128212567419279e-01,6.047322567267381377e-01,-1.965503770718391552e+00,-2.096982664784231787e+00,1.269103252111400681e-01,6.751060902271214914e+00,5.910331031458312623e+00,-6.200308696641878115e+00,5.715923284776543767e+00,4.030748268924964073e+00,4.600455274492318836e+00 +-2.074304412279668330e+00,1.360712017343177704e+00,9.251475424004992343e-01,-2.383440075185936924e-01,-1.083413596973536563e+00,-9.697599544848316544e-01,6.441411052097585355e+00,5.461601793406312133e+00,-5.208872431540394032e+00,3.472725521286179617e+00,4.326691175634730158e+00,4.200401906248015571e+00 +-2.612709753918561972e+00,-1.702468264429592981e-01,-3.373551225247852337e+00,7.280554488963433268e-01,2.319313567379491792e-01,8.094720158030519475e-01,6.446830655778663655e+00,5.538956232612101793e+00,-4.981796819407129817e+00,2.406619465325531948e+00,4.497009142654490255e+00,3.232925210389196558e+00 +-5.287188450741622248e-01,-6.095815948953714480e-01,-6.264637216247121998e+00,8.323919370189147848e-02,6.473154449270843713e-01,7.835103380678738461e-01,5.984142948224764780e+00,5.229714827305749658e+00,-5.223713406750629673e+00,1.843460253666439286e+00,3.559375867868571408e+00,5.720746949205340215e+00 +1.427558787308593535e+00,-1.557302632852493318e-01,-4.597806285564792539e+00,-1.317988961575996765e+00,-2.188784664657661416e+00,-1.067990096138581313e+00,4.565907732054268031e+00,3.579763228869906833e+00,-6.308078353552398099e+00,3.078447130025423473e+00,4.832425975578442667e+00,5.011868797950294052e+00 +2.764127317348215929e+00,-2.686501783861953374e+00,-8.781008760933306512e-01,-1.878669373366243400e+00,-1.627916373166508723e+00,-2.543525014225115344e-01,2.474722339252600101e+00,4.017719734625418759e+00,-5.754602740312241949e+00,5.835979325068624490e+00,6.232338477435537705e+00,3.576325938558922779e+00 +1.543226017616111445e+00,-2.610207787022557291e+00,6.607750716330910157e-01,-1.309906651928841148e+00,-1.785712370039657770e+00,1.285798377024816475e+00,2.558548751022986067e+00,5.707837790419183932e+00,-3.563140784313892695e+00,6.480571766391602040e+00,6.253921768985826191e+00,3.881109340114460515e+00 +1.816779137249906473e-01,3.486292015042394832e-01,-2.840740209899343505e+00,3.379650278242448125e-01,-8.198855256058732444e-01,-7.181831308890092025e-01,2.723290370302909214e+00,4.875980960146384824e+00,-2.913466572202565885e+00,4.859607613288064343e+00,5.097350440448430220e+00,3.619998990410613793e+00 +-4.815905563663047118e-01,1.928041997345812275e+00,-4.815165219484327963e+00,2.363014709460953711e+00,-2.363669136181059116e+00,-1.316525815903691443e+00,1.826417649917365882e+00,4.500876629176228327e+00,-5.308566677002847456e+00,2.653997718300394659e+00,3.362139387239098554e+00,3.670391412629564254e+00 +-7.891240805373735956e-01,3.029472701355132802e+00,-3.758047625023251292e+00,2.288737409872123596e+00,-1.964455413069789991e+00,1.455759453557631034e+00,1.522305917526569630e-01,6.006149330688311139e+00,-7.122756661281025359e+00,3.790575847005788646e+00,2.664256832658303153e+00,3.063278519312641279e+00 +-1.427442697307999886e+00,1.422722195605895079e+00,-3.664943911779358077e-01,6.045914555489940723e-01,-2.351577888029317887e+00,1.028308835477044436e+00,5.875058199155538396e-01,5.281385630745823789e+00,-4.937230596280533845e+00,5.133190913151953083e+00,5.076220016262959156e+00,5.934188040842376388e+00 +-2.056090676664585004e+00,5.489115262117172378e-01,-1.190441230292737185e+00,-8.992542381752988145e-01,-2.054960448025206610e+00,-6.049953351586672134e-01,2.227411624213786467e-01,3.636772975192758217e+00,-3.001759071036719462e+00,5.036880885972359501e+00,5.495733033675128887e+00,5.145021568904277309e+00 +-9.582695723029276413e-01,1.852360103617486153e+00,-3.063441926180958674e+00,-1.953418429471263984e+00,-2.071154166554376896e+00,-8.119869804624716414e-01,1.230599694952299483e+00,4.824080340429043545e+00,-2.341051401377774788e+00,5.870680100001074209e+00,4.469277296861590365e+00,1.075704134029249914e+00 +-1.582376910602060760e-01,2.189889734009317301e+00,-4.747345179362754131e+00,-3.151931765461428170e-01,-4.509538900066920597e-01,-2.835531601968044879e-01,3.784604545548090115e+00,5.318221682007409967e+00,-6.180781124865308840e+00,5.646261655890590703e+00,5.054598689700086744e+00,3.793705868482696086e+00 +6.959286950578849051e-01,6.932436058180182492e-01,-2.546648440975163297e+00,2.062745429629262439e+00,-2.740780178961054858e+00,-3.269385040430629052e-01,6.430060983904160921e+00,4.100766536154779374e+00,-6.582948750427796369e+00,4.814588188534152380e+00,6.198388469805136580e+00,6.932549558323367833e+00 +1.685347181800349592e+00,-1.541952554884603810e+00,-1.621865943421657352e+00,1.936196794573352253e+00,-3.265755400220438531e+00,-9.629537655635460602e-01,8.370956373988427401e+00,3.038401085353682696e+00,-5.588498992214349315e+00,4.263243556096690590e+00,5.805831702509976822e+00,6.569832060772085036e+00 +5.619516977521019641e-01,-2.315973686782938357e-01,-3.281274591059596180e+00,-2.220697764081181469e-01,-4.628198926143993752e-01,2.151012543333026272e-01,7.745727657241504360e+00,2.685129616860215673e+00,-2.731165510437813637e+00,2.595087672753301433e+00,5.201334641044449647e+00,3.219474986801178940e+00 +-1.349268180125596350e+00,1.655809891178515514e+00,-5.231539015833450712e+00,-1.165785651985337124e+00,1.252610657179511477e+00,-4.897586746070303199e-01,8.129168315619942575e+00,2.922562657270406206e+00,-4.085018491545847397e+00,2.790472003681623292e+00,5.383394530934806355e+00,3.586778506060198879e+00 +-2.100389066768320045e-01,-1.494682972458926340e+00,-3.584572825600746970e+00,-1.401587609857073424e+00,2.251384033992794853e-01,-5.934183954548902795e-01,7.441139800707048835e+00,3.377305662599261460e+00,-5.913173941404687994e+00,4.668216539632784823e+00,6.737978412044866694e+00,6.493094468850287626e+00 +5.485587735747304849e-01,-2.068089318784516628e+00,-2.243471552217253251e+00,4.381068794323733639e-01,-5.229656391900303625e-01,-1.652041200608006077e+00,7.053254100833267870e+00,4.537533579414166240e+00,-8.793919469542744949e+00,5.559195251967260276e+00,5.876154813035684654e+00,5.726585109741426649e+00 +1.627196506847609525e+00,-5.942359193839552578e-01,-2.204020140251607973e+00,9.430541241391489082e-01,4.904723649761350557e-01,-2.143035632598147555e-01,5.841940752562459593e+00,5.217452086683557866e+00,-7.019671504862639466e+00,4.844290408304894768e+00,4.823209524717708696e+00,5.144027609039376614e+00 +1.046652319512691109e+00,-1.417153156446207873e+00,-3.026556964533265948e+00,5.928952139307764879e-01,1.397674651075489383e+00,1.968044498640865392e+00,5.644068693543877302e+00,4.831472166896567444e+00,-3.728254800048016904e+00,2.836757308576917325e+00,6.056575880500338016e+00,2.951545081531402381e+00 +8.954242013556753532e-01,-6.795137874097308961e-01,-4.097073586783590571e+00,2.247506211259183928e+00,5.553576947692637766e-01,1.975155891931058783e+00,7.154470288911317866e+00,5.087541508707205118e+00,-1.767290320286429406e+00,4.937862198091091948e+00,4.949358407465503262e+00,2.594159642289888712e+00 +2.024594371026763318e-01,-7.194769430688550127e-01,-3.691388889817485364e+00,1.247845108867251529e+00,-5.504762302810080055e-01,2.528715780911335309e-01,7.945662466242399802e+00,4.113873776637209545e+00,-5.456182505782537717e+00,5.985536488560954993e+00,4.227285404794214330e+00,3.250246637205135958e+00 +-1.218874410964381871e+00,-1.758523559724022300e+00,-1.962711156225793285e+00,-6.279759711458275939e-01,-7.583446693541391337e-01,-1.969618325843885298e+00,6.209907318174950674e+00,5.171693078300720003e+00,-8.264793271933067231e+00,6.769228940532217464e+00,4.723855349476290932e+00,4.573308323698332956e+00 +-2.148197705717832751e+00,-4.649936575244811854e-01,-1.035598528720010014e+00,-1.263971379281882523e+00,-1.440673375841695358e-01,-7.451968988208304001e-01,5.160471704650104741e+00,8.829560451716705671e+00,-8.539634940588666012e+00,6.936680962381640114e+00,3.235429173871032571e+00,4.781272951395972193e+00 +-4.544314759852931651e+00,-8.545989354614917932e-01,-2.071564460370115768e+00,-1.859713842565478448e+00,-4.399036977135762916e-01,2.182765786583092726e+00,4.372573258042296906e+00,6.440370574082606048e+00,-4.042071270042839615e+00,4.212542412812976700e+00,2.781187122013275115e+00,4.479168097457306708e+00 +-4.486765194885099284e+00,-2.890474908541997845e-01,-3.304044548606959175e+00,-3.223399180927806196e+00,-1.384748085255008476e+00,-1.868236729530170503e-01,2.959165170569567849e+00,4.369281965863371475e+00,-1.220165504047935556e+00,1.617402290075530047e+00,5.815718881738203727e+00,4.828299432327773921e+00 +-4.170539930389232097e+00,-7.684023046047840921e-01,-3.367635804336703842e+00,-1.180843434519597279e+00,-8.993294612879940697e-01,-7.435772416320318712e-02,1.629105066454298978e+00,4.767559151104483917e+00,-2.219961471007065867e+00,4.903561968792375936e-01,5.598205773850448708e+00,4.913656571866479084e+00 +-3.523686550790577687e+00,-3.056594155900486331e+00,-2.051846010921448471e+00,4.101145198083533394e-01,-3.884020341704730872e-01,1.523945307494584789e+00,3.781118856231072911e+00,5.366156690733308920e+00,-5.953217837094241460e+00,5.092017483868774619e+00,4.999319843892386039e+00,6.942958589801051517e+00 +-1.231878446629819157e+00,-8.628479855229752582e-01,-1.852650007446889902e-01,4.164919293050345361e-01,-1.818600026041566453e+00,-6.325278618971902356e-01,5.727470622257770927e+00,6.606226639230827757e+00,-5.685092000716645444e+00,8.653686802421903579e+00,5.247871113133442122e+00,4.355589457239776863e+00 +1.956579062862326412e-01,-1.001402484624956291e+00,-1.648018653455823657e+00,2.434358954106604633e-01,-2.561738949798213039e+00,-6.890056798907447932e-01,4.056488502898882409e+00,6.690025192103396634e+00,-2.331567658168782309e+00,5.552714407733484236e+00,6.007846107145587311e+00,2.146593937367936356e+00 +-1.337066816391997426e-01,-2.047520375519126468e+00,-3.749752336434905686e+00,-3.931332829248479488e-01,-1.077515842127685142e+00,1.542742312933534254e+00,2.580269691490353345e+00,5.901132988743353458e+00,-7.691250593657450452e-01,2.210681287616310442e+00,6.434215027836306611e+00,4.875300410899473746e+00 +9.668771311119847001e-01,-6.273495735455852929e-01,-3.464873707554440241e+00,3.764108608449281412e-01,-9.895920052136639722e-01,1.161514522980211739e+00,3.628069278143565946e+00,6.131490020509800409e+00,-2.807926682618692382e+00,3.781176319494426785e+00,5.644086292870081678e+00,6.295275496957129846e+00 +1.023428666895920269e+00,-1.490947232034514069e+00,-1.063370549377946617e+00,2.724388074533872217e-01,-4.805155746626946778e-01,-1.755875309538191154e+00,3.654881850994777803e+00,4.689417980765189675e+00,-5.528393825976277043e+00,4.667400875369982494e+00,5.661798326079645172e+00,3.049022038497731568e+00 +2.150110557047250293e+00,-1.621289848912258691e+00,-2.964810620807731834e-01,-1.087686635188899409e+00,-9.241544841718278835e-01,6.180224836607822736e-01,2.664532191692702057e+00,4.131871114110043486e+00,-5.334562974188928663e+00,4.024744383922982394e+00,6.436296956384975232e+00,3.522493084638647964e+00 +1.266245098542744252e+00,-8.409693056241449582e-01,-4.467070844934681872e-01,-9.450716491339776759e-01,-2.703589839154058039e-01,-1.247291591611538131e+00,2.352935829618759911e+00,5.294486498515911244e+00,-2.184654645284031638e+00,4.832681676656637393e+00,6.616047156565080023e+00,5.649768473177917194e+00 +1.256567595988526742e+00,-5.209123436247663497e-01,-3.554136627479212329e+00,9.851981865032901631e-01,6.066463566938560259e-01,-2.551671700953049182e+00,1.672367682222062335e+00,2.991517995066215185e+00,-1.891614166973342392e+00,5.547414844407583701e+00,7.085151694555714030e+00,4.583695315487106292e+00 +2.417146698983693387e+00,3.423420420221746086e-01,-5.468012133391135343e+00,1.912905303796538226e+00,6.560968323400628854e-01,1.772509328255130079e+00,1.402301657971260163e+00,3.194428186749342480e+00,-4.548483482978879167e+00,5.542356401725575310e+00,6.663883921111593445e+00,4.853820641199991570e+00 +1.999619757842334256e+00,7.041442580822796504e-01,-2.990899524647011809e+00,-1.416389906301699364e-01,1.065260057237635394e+00,2.783868292014194612e+00,2.188410837817223786e+00,5.161519878274964057e+00,-7.214884734592697413e+00,3.448410263284237054e+00,6.816941834764618235e+00,6.767420930176283989e+00 +9.646047438525601203e-01,3.112914907040449375e-01,-7.596712250334135552e-01,-1.123555366811338718e+00,-1.525906179172539012e+00,-4.163455596841435513e-01,3.793433139321964020e+00,5.691750360447885626e+00,-5.329084179349631967e+00,2.839667799187656883e+00,6.447140902238344573e+00,5.071253234787926445e+00 +1.035829306169633313e+00,-2.566617040964844776e-01,6.788594751020975426e-01,-1.970825217969614851e+00,-7.148301052070493711e-01,-2.998344115800775533e+00,5.006537963053346196e+00,6.376420985584982049e+00,-2.529233866513511231e+00,1.454425914995181479e+00,7.646398408348217046e+00,3.384971377637188361e+00 +-2.080360628734745365e-02,-8.295629174762977343e-01,-2.782322861236617317e+00,-1.588768468010897106e+00,3.621706258074113283e-01,-9.711881462604883275e-01,3.767405040453775111e+00,4.703163164051236578e+00,-1.541082078909212782e-01,3.066442966190603414e+00,7.223949483958030271e+00,5.153175615421303313e+00 +-2.176668615489226077e+00,-2.628618509971152317e-01,-4.108035577763891943e+00,1.697196899449040952e+00,-3.187822838173301143e+00,-2.520907834412113857e-01,4.623391059460301378e+00,4.388386902732331230e+00,-2.616420880557069673e+00,1.018395404878577715e+01,5.566905158542308385e+00,5.490643348955582148e+00 +-1.645649893157671961e+00,-1.107320912818145486e-01,-3.029372871602017181e+00,2.739496055782805861e+00,-4.118440239559833849e+00,1.035907301016749438e+00,4.832636701292732262e+00,3.721206062337678144e+00,-4.841557479246951345e+00,9.145626172885091165e+00,5.354959420757586130e+00,4.607999112388112550e+00 +7.534819207451137757e-03,-1.807902225159827014e-01,-1.471331302585484835e+00,-2.877547850274729813e-01,-2.540128763284053015e+00,1.359912408171636500e+00,4.065396168114132180e+00,3.073067398277013140e+00,-5.552206172695049347e+00,5.626049868871119131e-01,8.077022293538270503e+00,2.753384930472553282e+00 +1.471081049679962671e+00,-7.594261866225597846e-01,-3.180930202323710709e-01,-4.211102352125222659e+00,-1.921930323566371612e+00,-4.543019441167683858e-01,4.572059863224049003e+00,6.011934241154982317e+00,-1.318420469346107460e+00,-6.846078514769136092e-01,6.252954475747578122e+00,6.060362421801588795e+00 +1.581846486994021195e+00,-1.260619695530512008e+00,-1.641419606323502034e+00,-3.646624699638710609e+00,-2.492475649143024885e+00,-7.179682574043153576e-01,3.321180551913100132e+00,6.104450065015529781e+00,3.719103162558630160e-01,4.445272293410904574e+00,4.173886015269443917e+00,6.095660541184315662e+00 +8.681041898081029995e-01,-2.368645667974292213e+00,-2.154724203230642399e+00,6.095112830632625212e-01,-3.475142993412908332e+00,-1.507253315314915909e-01,4.119815713223559328e+00,3.784732117363432735e+00,-1.764107174036215753e-01,4.822307205518326256e+00,5.689716050834727312e+00,2.054433501081257329e+00 +7.464971289730815140e-01,-9.377553478592643987e-01,-2.369222972776462655e+00,3.080844885536041744e+00,-2.529406287482803606e+00,1.845371391610271861e+00,3.968941913572073155e+00,2.010025383505255725e+00,-2.710883466293171118e+00,5.545202534322687171e+00,6.088343999382616722e+00,4.926708257184033712e+00 +-2.586553280014266543e-01,2.427463824807717607e+00,-2.090201914853409448e+00,2.880203708208450841e+00,-1.997387761319865174e+00,-1.450964694687728596e-01,4.754551282526016109e+00,8.089492130796367420e-01,-1.601383943903982754e+00,5.737130497143407837e+00,4.782675577584496018e+00,3.677885129263262609e+00 +-2.248670046397965194e+00,1.597540861553059521e+00,-8.412007692607401665e-01,-6.086934482895656195e-01,-7.480078423641320917e-01,1.489591720044582956e+00,6.654717642939099065e+00,3.186448218260450993e+00,4.658021322860061986e-01,4.823697145528843322e+00,3.414984659017686663e+00,5.487437335735822153e+00 +-3.348281158520842737e+00,-5.568998104568737562e-01,-6.790211283644291829e-01,-2.212927024942084930e+00,-1.704558916373672073e+00,2.041808125015635689e+00,5.846991335580540294e+00,3.337831596232551767e+00,2.098744541113709783e+00,3.277236079611692610e+00,3.458622460117518838e+00,7.928515531651342485e+00 +-1.442897591944459768e+00,6.003522344310079362e-01,-1.966815026637586783e+00,1.775244020653421462e+00,-2.148144986929363398e+00,-2.434428890479605201e+00,5.420332938498822806e+00,3.619542836945243813e+00,-2.954857026603994186e-01,-8.673261466362269800e-03,4.306900508165536046e+00,4.791877210409132282e+00 +-1.155569605286546508e+00,1.618056367957397379e+00,-2.721818127545462040e+00,2.202103774385236967e+00,-1.438884608281589372e+00,-2.209300241018751887e+00,5.135802701613228827e+00,5.306944258591969721e+00,-3.018768166747916482e+00,2.699741853299243211e+00,4.617001162087944088e+00,2.222352491173781530e+00 +-3.488644218277279663e+00,-1.118599074445602914e+00,-3.376181105811172145e+00,9.934883595704446790e-02,-8.931507530256685978e-01,8.389966878750507728e-01,5.211812976787111751e+00,6.012687555812719964e+00,-1.814331686260398868e+00,5.796907755573260879e+00,5.797227449713707514e+00,3.538194632175998056e+00 +-4.525915842193184879e+00,-2.527108622493257961e+00,-9.006577877899053508e-01,-8.372490761748260235e-01,-6.829973961814593819e-01,-6.776843756425867671e-01,5.850405858604291964e+00,5.002963190238576274e+00,2.325373448708866864e+00,7.853608908550089751e+00,5.246662491251035121e+00,4.548401650003722096e+00 +-3.528879989993249922e+00,-4.378075977148364051e-01,-2.926237906261323074e-01,-1.590005961376358634e+00,-1.022521221361091692e+00,8.323101314925480665e-01,5.232798442089570123e+00,4.633711673012578913e+00,4.109426929386741811e+00,8.901357845062772611e+00,5.209590230851312675e+00,4.065213765574621441e+00 +-2.562622331885585414e+00,3.925845799236385436e-01,-1.507726935910972177e+00,-8.850671985609452896e-01,-1.397999139484212172e+00,-6.484968177871324180e-01,3.983304983396819043e+00,4.828507420812822915e+00,5.769515094244934694e-01,7.067279836372568624e+00,6.981173134628340549e+00,4.550353080264919470e+00 +-1.856378488901909707e+00,-9.512754879744241832e-01,-4.717032237780614423e+00,-9.504417158092401952e-01,-6.267080868992040976e-01,-1.558303820395119388e+00,3.427519950071115318e+00,5.866323724575988052e+00,-3.580441950275538687e+00,4.626256440073035492e+00,4.503333861719587716e+00,4.957952609674167732e+00 +-2.736969358397962626e+00,-1.946944243544922459e+00,-3.753120219952542413e+00,-6.556130942003135509e-03,-1.224376561236242056e+00,1.725091624855118466e-01,2.406053051552568434e+00,4.804445006333999402e+00,-2.561227109038438421e+00,3.115903428004851872e+00,3.301671559239126807e+00,5.966307568101433034e+00 +-2.561640507428150748e+00,-5.380962687630521657e-01,-2.159943664332667268e+00,1.762954755872925938e+00,-1.427884855214213378e+00,-1.638143791690184425e+00,2.398718774257984521e+00,2.981422967539768987e+00,3.296672324359811945e+00,3.678250983605062974e+00,3.692330793902033559e+00,3.753516855710441913e+00 +-9.913390773313958126e-01,-2.876758355812703005e-01,-1.429933999842837533e+00,1.642212480845887779e+00,-3.644403502179132115e-01,1.377236017855435035e+00,1.477832309500654695e+00,4.353594444037942601e+00,5.389545340331115320e+00,4.054115218922375696e+00,6.148688400590705427e+00,3.721102504340284334e+00 +-1.056769796014828788e+00,-2.885595533548598324e-01,-2.794503445044821177e+00,1.083090011598699665e+00,-1.380432962374760208e+00,2.497806290360156112e+00,4.167258891973620649e+00,5.486485952944832434e+00,2.519490343192760662e-01,3.409630610888273239e+00,6.497457287133940440e+00,4.337604040259845029e+00 +2.274903620720447606e-02,1.178709472965589367e+00,-2.191907922540884179e+00,1.147040859111712718e+00,-1.401092978881083306e+00,-3.073230208175671940e+00,3.143035241306364469e+00,6.164143814132094334e+00,-4.478989164556972824e+00,3.865238097566201958e+00,4.555747830634098605e+00,2.919483985102220736e+00 +-7.017977424499139971e-01,5.864426458347605786e-01,-1.052410741057270505e+00,-1.888349389452816052e+00,2.640406417979335174e-02,-1.251767083818635440e+00,1.900322526990272820e+00,4.288786027707170767e+00,-3.078458539960416296e+00,4.917959417157974578e+00,4.466967039398555883e+00,5.277507025041583866e+00 +-4.761575079868316518e-01,2.033879914173333692e+00,-2.119219470474183797e+00,-1.503623604112065859e+00,1.245419952932540264e+00,8.331834933385302566e-01,3.225788328670576632e+00,2.650018181237939530e+00,2.432707009371107176e+00,3.829198120567822272e+00,6.021729223149659305e+00,6.293117544191258794e+00 +-4.431170435912721306e-01,3.345382851260067181e+00,-3.650137045873921515e+00,-1.583532031681139873e+00,-1.326217744881489358e-01,9.462570124815141703e-01,4.987162622205645768e+00,4.128336989800555301e+00,4.831569541643757404e+00,3.211246100750840160e+00,5.343902502998334469e+00,4.456169118280132402e+00 +2.268017495856458066e-01,2.729099977591017634e+00,-4.479510528684833126e+00,-1.726598591179097086e-01,8.178885935778972271e-01,1.721393428966342398e+00,5.703450235584239358e+00,1.609590439570473208e+00,8.657061566026489352e-01,5.832052241539065207e+00,3.275055400053695553e+00,5.038320002771330408e+00 +6.307699118156784301e-02,3.615462745775235298e+00,-1.710251122774079313e+00,3.217555815846976586e+00,1.736861623118536535e+00,4.848628069717411115e-01,4.543319198418915406e+00,2.863637671601732215e+00,-2.504718404866139281e+00,8.080870330493329590e+00,3.499845759335292961e+00,6.471067310492218283e+00 From e679c41254229e814a7300900a3c5cac9bfcebd8 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 4 Apr 2026 20:10:01 +1100 Subject: [PATCH 14/20] updates --- lectures/_static/quant-econ.bib | 31 + lectures/blackwell_kihlstrom.md | 685 +++++++-------------- lectures/merging_of_opinions.md | 154 ++--- lectures/survival_recursive_preferences.md | 6 +- 4 files changed, 350 insertions(+), 526 deletions(-) diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 7ce311876..3a8a1e6ac 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -3869,6 +3869,17 @@ @article{csiszar1963 pages = {85--108} } +@article{morimoto1963, + author = {Morimoto, Tetsuzo}, + title = {{Markov Processes and the H-Theorem}}, + journal = {Journal of the Physical Society of Japan}, + year = 1963, + volume = {18}, + number = {3}, + pages = {328--331}, + doi = {10.1143/JPSJ.18.328} +} + @article{ali1966, author = {Ali, S. M. and Silvey, S. D.}, title = {{A general class of coefficients of divergence of one distribution from another}}, @@ -3879,6 +3890,16 @@ @article{ali1966 pages = {131--142} } +@article{liese2012, + author = {Liese, Friedrich}, + title = {{phi-divergences, sufficiency, Bayes sufficiency, and deficiency}}, + journal = {Kybernetika}, + year = 2012, + volume = {48}, + number = {4}, + pages = {690--713} +} + @book{chentsov1981, author = {{\v{C}}encov, Nikolai N.}, title = {{Statistical Decision Rules and Optimal Inference}}, @@ -3889,6 +3910,16 @@ @book{chentsov1981 year = 1981 } +@book{amari_nagaoka2000, + author = {Amari, Shun-ichi and Nagaoka, Hiroshi}, + title = {{Methods of Information Geometry}}, + series = {Translations of Mathematical Monographs}, + volume = {191}, + publisher = {American Mathematical Society and Oxford University Press}, + address = {Providence, RI}, + year = 2000 +} + @inproceedings{tishby_pereira_bialek1999, author = {Tishby, Naftali and Pereira, Fernando C. and Bialek, William}, title = {{The Information Bottleneck Method}}, diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index 29658374e..1a605a8ec 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -32,7 +32,7 @@ kernelspec: This lecture studies *Blackwell's theorem* {cite}`blackwell1951,blackwell1953` on ranking statistical experiments. -Our presentation brings in findings from a Bayesian interpretation of Blackwell's theorem by {cite}`kihlstrom1984`. +Our presentation brings in findings from a Bayesian interpretation of Blackwell's theorem by {cite:t}`kihlstrom1984`. Blackwell and Kihlstrom study statistical model-selection questions closely related to those encountered in this QuantEcon lecture {doc}`likelihood_bayes`. @@ -47,7 +47,7 @@ We are free to interpret the "state" as a "parameter" or "parameter vector". In a two-state case $S = \{s_1, s_2\}$, the two conditional densities $f(\cdot) = \mu(\cdot \mid s_1)$ and $g(\cdot) = \mu(\cdot \mid s_2)$ are the ones used repeatedly in our studies of classical hypothesis testing and Bayesian inference in this QuantEcon lecture {doc}`likelihood_bayes` as well as several other lectures in this suite of QuantEcon lectures. -{cite}`kihlstrom1984` interprets the question — *which experiment is more informative?* — as asking which conditional probability model allows a Bayesian decision maker with a prior over $\{s_1, s_2\}$ to gather higher expected utility. +{cite:t}`kihlstrom1984` interprets the question *which experiment is more informative?* as asking which conditional probability model allows a Bayesian decision maker with a prior over $\{s_1, s_2\}$ to gather higher expected utility. We'll use the terms "signal" and "experiment" as synyomyms. @@ -58,7 +58,7 @@ Signal $\mu$ is **at least as informative as** signal $\nu$ if every Bayesian de This economic criterion is equivalent to two statistical criteria: - *Sufficiency* (Blackwell): $\tilde{x}_\nu$ can be generated from $\tilde{x}_\mu$ by an additional randomization. -- *Uncertainty reduction* (DeGroot {cite}`degroot1962`): $\tilde{x}_\mu$ lowers expected uncertainty at least as much as $\tilde{x}_\nu$ for every concave uncertainty function. +- *Uncertainty reduction* ({cite:t}`degroot1962`): $\tilde{x}_\mu$ lowers expected uncertainty at least as much as $\tilde{x}_\nu$ for every concave uncertainty function. Kihlstrom's formulation focuses on the *posterior distribution*. @@ -69,7 +69,7 @@ In the two-state case, this becomes a mean-preserving-spread comparison on $[0, The lecture proceeds as follows: 1. Set up notation and define experiments as Markov matrices. -2. Define stochastic transformations (Markov kernels). +2. Define stochastic transformations using Markov kernels. 3. State the three equivalent criteria. 4. State and sketch the proof of the main theorem. 5. Develop the Bayesian interpretation via standard experiments and mean-preserving spreads. @@ -85,7 +85,7 @@ from scipy.optimize import minimize np.random.seed(42) ``` -## Experiments and Markov matrices +## Experiments and stochastic transformations ### The state space and experiments @@ -110,26 +110,32 @@ Each row $i$ gives the distribution of signals when the true state is $s_i$. μ = np.array([[0.6, 0.3, 0.1], [0.1, 0.3, 0.6]]) -ν = np.array([[0.5, 0.2, 0.3], - [0.2, 0.5, 0.3]]) +Q = np.array([[1.0, 0.0], + [0.5, 0.5], + [0.0, 1.0]]) -print("Experiment μ (rows sum to 1):") +ν = μ @ Q + +print("Experiment μ (3 signals, rows sum to 1):") print(μ) -print("\nExperiment ν:") +print("\nStochastic transformation Q (3 × 2):") +print(Q) +print("\nExperiment ν = μ @ Q (2 signals):") print(ν) print("\nRow sums μ:", μ.sum(axis=1)) print("Row sums ν:", ν.sum(axis=1)) ``` -### Stochastic transformations (Markov kernels) +### Stochastic transformations A **stochastic transformation** $Q$ maps signals from one experiment to signals from another by further randomization. In the discrete setting with $M$ input signals and $K$ output signals, $Q$ is an $M \times K$ Markov matrix: $q_{lk} \geq 0$ and $\sum_k q_{lk} = 1$ for every row $l$. -```{admonition} Definition (Sufficiency) -:class: tip +```{prf:definition} Sufficiency +:label: def-sufficiency + Experiment $\mu$ is *sufficient for* $\nu$ if there exists a stochastic transformation $Q$ (an $M \times K$ Markov matrix) such that @@ -143,14 +149,19 @@ $\tilde{x}_\nu$ by passing their signal through $Q$. If you observe the more informative signal $\tilde{x}_\mu$, then you can always *throw away* information to reproduce a less informative signal. -The reverse is not possible: a less informative signal cannot be enriched to recover what was lost. +The reverse is not possible: a less informative signal cannot be enriched to +recover what was lost. -```{code-cell} ipython3 -def is_markov(M, tol=1e-10): - """Check whether a matrix is a valid Markov (row-stochastic) matrix.""" - return np.all(M >= -tol) and np.allclose(M.sum(axis=1), 1.0) +We can verify this numerically using the two experiments $\mu$ and $\nu$ +defined above. + +The function below searches for a stochastic transformation $Q$ that +minimizes $\|\nu - \mu \, Q\|$. +If an exact $Q$ exists the residual will be close to zero; otherwise it will +be large. +```{code-cell} ipython3 def find_stochastic_transform(μ, ν, tol=1e-8): """ Find a row-stochastic matrix Q that minimizes ||ν - μ @ Q||. @@ -184,27 +195,42 @@ def find_stochastic_transform(μ, ν, tol=1e-8): Q = unpack(result.x) residual = np.linalg.norm(ν - μ @ Q) - return Q, residual, result.success + return Q, residual + +# Forward: find Q such that ν = μ @ Q (should succeed) +Q_fwd, res_fwd = find_stochastic_transform(μ, ν) +print("Forward (μ to ν):") +print(f" residual = {res_fwd:.2e}") +print(f" exact transformation exists: {res_fwd < 1e-6}") + +# Reverse: find Q' such that μ = ν @ Q' (should fail) +Q_rev, res_rev = find_stochastic_transform(ν, μ) +print("\nReverse (ν to μ):") +print(f" residual = {res_rev:.2e}") +print(f" exact transformation exists: {res_rev < 1e-6}") +``` -Q_true = np.array([[1.0, 0.0], - [0.0, 1.0], - [0.0, 1.0]]) +The forward residual is close to zero: a stochastic transformation from +$\mu$ to $\nu$ exists, confirming that $\mu$ is sufficient for $\nu$. -ν_garbled = μ @ Q_true -print("ν = μ @ Q_true:") -print(ν_garbled) -print("ν is Markov:", is_markov(ν_garbled)) +The reverse residual is large: no stochastic transformation can recover +$\mu$ from $\nu$. -Q_found, res, success = find_stochastic_transform(μ, ν_garbled) -print(f"\nRecovered Q (success = {success}, residual = {res:.2e}):") -print(np.round(Q_found, 4)) -print("Rows of Q sum to:", Q_found.sum(axis=1).round(4)) -``` +No stochastic transformation can undo the +information loss. + +The key is that the inverse of a stochastic transformation in general is not a stochastic transformation. + +In fact, the only stochastic transformations whose inverses are also stochastic are permutation matrices, which merely relabel signals without losing any information. ## Three equivalent criteria +Blackwell's theorem establishes that three different ways of comparing experiments all turn out to be equivalent. + ### Criterion 1: the economic criterion +The first criterion compares experiments by their value to decision makers. + Let $A$ be a compact convex set of actions and $u: A \times S \to \mathbb{R}$ a bounded utility function. @@ -226,8 +252,9 @@ B(\mu, A, u) = \Bigl\{v \in \mathbb{R}^N : \text{ for some measurable } f: X \to A \Bigr\}. $$ -```{admonition} Definition (Economic criterion) -:class: tip +```{prf:definition} Economic criterion +:label: def-economic-criterion + $\mu$ is **at least as informative as** $\nu$ in the economic sense if $$ @@ -249,8 +276,11 @@ Equivalently, every Bayesian decision maker attains weakly higher expected utili ### Criterion 2: the sufficiency criterion -```{admonition} Definition (Blackwell sufficiency) -:class: tip +The second criterion uses the stochastic transformation idea introduced above. + +```{prf:definition} Blackwell sufficiency +:label: def-blackwell-sufficiency + $\mu \geq \nu$ in Blackwell's sense if there exists a stochastic transformation $Q$ from the signal space of $\mu$ to the signal space of $\nu$ such that $$ @@ -263,6 +293,8 @@ In matrix notation for finite experiments: $\nu = \mu \, Q$. ### Criterion 3: the uncertainty criterion +The third criterion compares experiments by how much they reduce uncertainty about the state. + {cite:t}`degroot1962` calls any concave function $U: P \to \mathbb{R}$ an **uncertainty function**. The prototypical example is Shannon entropy: @@ -271,8 +303,9 @@ $$ U(p) = -\sum_{i=1}^{N} p_i \log p_i. $$ -```{admonition} Definition (DeGroot uncertainty criterion) -:class: tip +```{prf:definition} DeGroot uncertainty criterion +:label: def-degroot-uncertainty + $\mu$ **reduces expected uncertainty at least as much as** $\nu$ if, for every prior $p \in P$ and every concave $U: P \to \mathbb{R}$, $$ @@ -310,8 +343,9 @@ Kihlstrom's standard-experiment construction will later let us compare posterior ## The main theorem -```{admonition} Theorem (Blackwell 1953; see also Blackwell 1951, Bonnenblust et al. 1949, and DeGroot 1962) -:class: important +```{prf:theorem} Blackwell's theorem +:label: thm-blackwell + The following three conditions are equivalent: (i) Economic criterion: $B(\mu, A, u) \supseteq B(\nu, A, u)$ for every compact convex $A$ and every bounded utility function $u$. @@ -321,15 +355,17 @@ The following three conditions are equivalent: (iii) Uncertainty criterion: $\int_P U(q)\,\hat\mu^p(dq) \leq \int_P U(q)\,\hat\nu^p(dq)$ for every prior $p \in P$ and every concave $U$. ``` +See also {cite:t}`blackwell1951`, {cite:t}`bonnenblust1949`, and {cite:t}`degroot1962`. + The hard part is the equivalence between the economic and sufficiency criteria. *Sketch (ii $\Rightarrow$ i):* If $\nu = \mu Q$, then any decision rule based on $\tilde{x}_\nu$ can be replicated by first observing $\tilde{x}_\mu$, then drawing a synthetic $\tilde{x}_\nu$ from $Q$, and then applying the same rule. -*Sketch (i $\Rightarrow$ ii):* Since $B(\mu, A, u) \supseteq B(\nu, A, u)$ for every $A$ and $u$, a separating-hyperplane (duality) argument implies the existence of a mean-preserving stochastic transformation $D$ mapping posteriors of $\nu$ to posteriors of $\mu$, which constructs the required $Q$. +*Sketch (i $\Rightarrow$ ii):* Since $B(\mu, A, u) \supseteq B(\nu, A, u)$ for every $A$ and $u$, a separating-hyperplane (duality) argument implies the existence of a posterior-space mean-preserving kernel $D$ sending the standard experiment of $\nu$ into that of $\mu$. Passing from these posterior laws back to the original signal spaces then yields the required garbling $Q$ with $\nu = \mu Q$. Thus $D$ is an intermediate randomization on posterior beliefs, not literally the signal-space kernel $Q$. *Sketch (ii $\Rightarrow$ iii):* Under a garbling, the posterior from the coarser experiment is the conditional expectation of the posterior from the finer experiment, so Jensen's inequality gives the result for every concave $U$. -*Sketch (iii $\Rightarrow$ ii):* The converse — that the inequality for all concave $U$ forces the existence of $Q$ — is proved in {cite}`blackwell1953`, and Kihlstrom's posterior-based representation makes the geometry transparent. +*Sketch (iii $\Rightarrow$ ii):* The converse, that the inequality for all concave $U$ forces the existence of $Q$, is proved in {cite}`blackwell1953`. Kihlstrom's posterior-based representation makes the geometry transparent. ## Kihlstrom's Bayesian interpretation @@ -346,8 +382,9 @@ $$ The posterior $p^\mu(x) \in P$ is a random point in the simplex. -```{admonition} Key property (mean preservation) -:class: note +```{prf:property} Mean preservation +:label: prop-mean-preservation + The prior $p$ is the expectation of the posterior: $$ @@ -357,16 +394,42 @@ $$ This is sometimes called the *law of iterated expectations for beliefs*. ``` -For a fixed prior $c$, Kihlstrom's **standard experiment** ${}^c\mu^*$ records only the posterior generated by $\mu$. +For a fixed prior $c$, Kihlstrom's **standard experiment** replaces the raw signals of $\mu$ with the posterior beliefs they generate. -Its distribution $\hat\mu^c$ on $P$ satisfies $\int_P q \, \hat\mu^c(dq) = c$. +Let $\hat\mu^c$ denote the distribution over posteriors induced by $\mu$ under prior $c$. +Mean preservation implies $\int_P q \, \hat\mu^c(dq) = c$. Two experiments are **informationally equivalent** when they induce the same posterior distribution. -The standard experiment strips away every detail of the signal except its posterior, so it is a *minimal sufficient statistic* for the comparison of experiments. +The standard experiment strips away every detail of the signal except its posterior, so it provides a canonical Bayesian representation for comparing experiments. + +A stochastic kernel on posterior beliefs lives on the simplex $P$, whereas a Blackwell garbling $Q$ lives on the original signal space. Kihlstrom's construction uses the former to study convex order and then recovers the latter after passing to standard experiments. Any two experiments that generate the same distribution over posteriors lead to identical decisions for every Bayesian decision maker, regardless of how different their raw signal spaces may look. +### Mean-preserving spreads and Blackwell's order + +Kihlstrom's key reformulation is the following. + +```{prf:theorem} Kihlstrom's Reformulation +:label: thm-kihlstrom + +$\mu \geq \nu$ in Blackwell's sense if and only if $\hat\mu^c$ is a +**mean-preserving spread** of $\hat\nu^c$; that is, + +$$ +\int_P g(p)\,\hat\mu^c(dp) \;\geq\; \int_P g(p)\,\hat\nu^c(dp) +$$ + +for every convex function $g: P \to \mathbb{R}$. +``` + +Equivalently, $\hat\mu^c$ is larger than $\hat\nu^c$ in convex order. + +A better experiment spreads posterior beliefs farther from the prior while preserving their mean. + +To see this concretely, we define two experiments for the two-state case and compute their posteriors. + ```{code-cell} ipython3 def compute_posteriors(μ, prior, tol=1e-14): """ @@ -403,14 +466,14 @@ prior = np.array([0.5, 0.5]) post_μ, probs_μ = compute_posteriors(μ_info, prior) post_ν, probs_ν = compute_posteriors(ν_info, prior) -print("=== Experiment μ (more informative) ===") +print("Experiment μ (more informative):\n") print("Signal probabilities:", probs_μ.round(3)) print("Posteriors (row = signal, col = state):") print(post_μ.round(3)) mean_μ, ok_μ = check_mean_preservation(post_μ, probs_μ, prior) print(f"E[posterior] = {mean_μ.round(4)} (equals prior: {ok_μ})") -print("\n=== Experiment ν (less informative) ===") +print("\n Experiment ν (less informative):\n") print("Signal probabilities:", probs_ν.round(3)) print("Posteriors:") print(post_ν.round(3)) @@ -418,10 +481,10 @@ mean_ν, ok_ν = check_mean_preservation(post_ν, probs_ν, prior) print(f"E[posterior] = {mean_ν.round(4)} (equals prior: {ok_ν})") ``` -### Visualizing posterior distributions on the simplex - For $N = 2$ states, the simplex $P$ is the unit interval $[0, 1]$ (the probability -of state $s_1$). We can directly plot the distribution of posteriors under +of state $s_1$). + +We can directly plot the distribution of posteriors under experiments $\mu$ and $\nu$. ```{code-cell} ipython3 @@ -465,27 +528,17 @@ def plot_posterior_distributions(μ_matrix, ν_matrix, prior, plot_posterior_distributions(μ_info, ν_info, prior) ``` -The more informative experiment $\mu$ pushes posteriors farther from the prior in both directions. +This is the mean-preserving spread in action: both distributions have the same mean (equal to the prior), but the more informative experiment $\mu$ spreads its posteriors farther apart. -### Mean-preserving spreads and Blackwell's order +We can verify the mean-preserving spread condition numerically. -Kihlstrom's key reformulation is the following. +The key fact is that, up to an affine term, any convex function can be represented as a mixture of +"call option" payoffs $g_t(p) = \max(p - t, 0)$. -```{admonition} Theorem (Kihlstrom's Reformulation) -:class: important -$\mu \geq \nu$ in Blackwell's sense if and only if $\hat\mu^c$ is a -**mean-preserving spread** of $\hat\nu^c$; that is, +Because the two posterior distributions being compared have the same mean, that affine term cancels in the comparison. -$$ -\int_P g(p)\,\hat\mu^c(dp) \;\geq\; \int_P g(p)\,\hat\nu^c(dp) -$$ - -for every convex function $g: P \to \mathbb{R}$. -``` - -Equivalently, $\hat\mu^c$ is larger than $\hat\nu^c$ in convex order. - -A better experiment spreads posterior beliefs farther from the prior while preserving their mean. +So it suffices to check $E[g_t(p^\mu)] \geq E[g_t(p^\nu)]$ for all +thresholds $t \in [0, 1]$. ```{code-cell} ipython3 --- @@ -532,6 +585,8 @@ def check_mps_convex_functions(μ_matrix, ν_matrix, prior, n_functions=200): _ = check_mps_convex_functions(μ_info, ν_info, prior) ``` +The difference $E[g_t(p^\mu)] - E[g_t(p^\nu)]$ is non-negative for every threshold $t$, confirming that $\hat\mu^c$ is a mean-preserving spread of $\hat\nu^c$ and therefore $\mu \geq \nu$ in the Blackwell order. + ## Simulating the Blackwell order with many states We now move to a three-state example. @@ -563,13 +618,13 @@ print(np.round(ν3, 3)) For three states, posterior beliefs live in a 2-simplex. -Let's visualize the posterior clouds under $\mu$ and $\nu$ +Let's visualize sampled posterior points under $\mu$ and $\nu$ ```{code-cell} ipython3 --- mystnb: figure: - caption: Posterior clouds on the 2-simplex + caption: Sampled posterior points on the 2-simplex name: fig-blackwell-simplex-clouds --- def sample_posteriors(μ_matrix, prior, n_draws=3000): @@ -634,9 +689,11 @@ def plot_simplex_posteriors(μ_matrix, ν_matrix, prior3, n_draws=3000): plot_simplex_posteriors(μ3, ν3, prior3) ``` -Under $\mu$, the posterior cloud reaches farther toward the vertices. +Because this example has only three signals, each panel consists of three posterior atoms sampled repeatedly rather than a continuous cloud. + +Under $\mu$, the sampled posterior points reach farther toward the vertices. -Under the garbled experiment $\nu$, the cloud stays closer to the center. +Under the garbled experiment $\nu$, the sampled posterior points stay closer to the center. ## The DeGroot uncertainty function @@ -720,6 +777,11 @@ def tsallis_entropy(p, q=2): return (1 - np.sum(p**q)) / (q - 1) +def tsallis_q15(p): + """Tsallis entropy with q=1.5 for an independent concavity check.""" + return tsallis_entropy(p, q=1.5) + + def sqrt_index(p): """Concave uncertainty index based on sum(sqrt(p_i)).""" p = np.clip(np.asarray(p), 0.0, 1.0) @@ -728,11 +790,14 @@ def sqrt_index(p): uncertainty_functions = { "Shannon entropy": entropy, "Gini impurity": gini_impurity, - "Tsallis (q=2)": tsallis_entropy, + "Tsallis (q=1.5)": tsallis_q15, "Square-root index": sqrt_index, } -print(f"{'Uncertainty function':<22} {'I(μ)':<10} {'I(ν)':<10} {'I(μ)>=I(ν)?'}") +header = (f"{'Uncertainty function':<22} " + f"{'I(μ)':<10} {'I(ν)':<10} " + f"{'I(μ)>=I(ν)?'}") +print(header) print("-" * 58) for name, U in uncertainty_functions.items(): I_μ = degroot_value(μ_info, prior, U) @@ -790,7 +855,13 @@ Every concave uncertainty function assigns weakly higher value to a more informa ## Connection to second-order stochastic dominance -The uncertainty-function representation makes the connection to **second-order stochastic dominance (SOSD)** explicit. +A random variable $X$ **second-order stochastically dominates** +$Y$ (written $X \succeq_{\text{SOSD}} Y$) if +$E[u(X)] \geq E[u(Y)]$ for every concave function $u$. +Equivalently, $Y$ is a mean-preserving spread of $X$. + +The uncertainty-function representation makes the connection +to SOSD explicit. Because $U$ is concave, $-U$ is convex, and the condition @@ -798,13 +869,11 @@ $$ \mathbb{E}[U(p^\mu)] \leq \mathbb{E}[U(p^\nu)] \quad \text{for all concave } U $$ -is precisely the statement that $\hat\mu^c$ dominates $\hat\nu^c$ in the **mean-preserving spread** sense on $P$. - -The Blackwell ordering on *experiments* is therefore isomorphic to the SOSD ordering on *distributions of posteriors*. +is precisely the statement that $\hat\mu^c$ dominates $\hat\nu^c$ in convex order on $P$. When $N = 2$, posterior beliefs are scalars in $[0, 1]$, and the SOSD comparison reduces to the classical integrated-CDF test. -Specifically, $\hat\mu^c$ is a mean-preserving spread of $\hat\nu^c$ if and only if $\int_0^t F_\nu(s)\,ds \geq \int_0^t F_\mu(s)\,ds$ for all $t \in [0,1]$, where $F_\mu$ and $F_\nu$ are the CDFs of the posterior on $s_1$ under each experiment. +Specifically, $\hat\mu^c$ is a mean-preserving spread of $\hat\nu^c$ if and only if $\int_0^t F_\mu(s)\,ds \geq \int_0^t F_\nu(s)\,ds$ for all $t \in [0,1]$, where $F_\mu$ and $F_\nu$ are the CDFs of the posterior on $s_1$ under each experiment. Equivalently, in SOSD language, the less informative posterior under $\nu$ dominates the more dispersed posterior under $\mu$. We can verify this graphically for the two-state example above @@ -863,11 +932,13 @@ def plot_sosd_posteriors(μ_matrix, ν_matrix, prior): int_ν = integrated_cdf(sv_ν, cm_ν, grid) ax2.plot(grid, int_μ, label=r"$\int F_\mu$", color="steelblue", linewidth=2) - ax2.plot(grid, int_ν, label=r"$\int F_\nu$", color="darkorange", linewidth=2) - ax2.fill_between(grid, int_μ, int_ν, - where=int_ν >= int_μ, - alpha=0.2, color="darkorange", - label=r"$\int F_\nu \geq \int F_\mu$ ($\mu$ MPS-dominates $\nu$)") + ax2.plot(grid, int_ν, color="darkorange", + label=r"$\int F_\nu$", linewidth=2) + ax2.fill_between(grid, int_ν, int_μ, + where=int_μ >= int_ν, + alpha=0.2, color="steelblue", + label=(r"$\int F_\mu \geq \int F_\nu$" + r" ($\mu$ is an MPS of $\nu$)")) ax2.set_xlabel(r"$t$", fontsize=12) ax2.set_ylabel("integrated CDF", fontsize=12) ax2.text(0.03, 0.94, "integrated CDFs", transform=ax2.transAxes, va="top") @@ -879,91 +950,6 @@ def plot_sosd_posteriors(μ_matrix, ν_matrix, prior): plot_sosd_posteriors(μ_info, ν_info, prior) ``` -## Mean-preserving randomization - -Kihlstrom proves that (i) $\Rightarrow$ (ii) by explicit construction. - -Given that $\mu$ achieves at least the value of $\nu$ for every decision maker, he constructs a stochastic transformation $D(p^0, \cdot)$ on $P$ that is **mean-preserving**: - -$$ -\int_P q \, D(p^0, dq) = p^0. -$$ - -Setting $Q = D$ provides the Markov kernel witnessing Blackwell sufficiency. - -The mean-preservation condition says: passing $\tilde{x}_\mu$ through $Q$ to produce a synthetic $\tilde{x}_\nu$ cannot add information — it only destroys it. - -```{code-cell} ipython3 -def verify_garbling_mean_preservation(μ_matrix, Q_matrix, prior): - """Verify that a garbling preserves the prior as the mean posterior.""" - ν_matrix = μ_matrix @ Q_matrix - posts_μ, probs_μ = compute_posteriors(μ_matrix, prior) - posts_ν, probs_ν = compute_posteriors(ν_matrix, prior) - - mean_μ = (posts_μ * probs_μ[:, None]).sum(axis=0) - mean_ν = (posts_ν * probs_ν[:, None]).sum(axis=0) - - print(f"Prior: {prior.round(4)}") - print(f"E[p^μ]: {mean_μ.round(4)}") - print(f"E[p^ν = p^(μQ)]: {mean_ν.round(4)}") - print(f"Both equal prior? μ: {np.allclose(mean_μ, prior)}, " - f"ν: {np.allclose(mean_ν, prior)}") - - -Q_soft = np.array([[0.7, 0.3], - [0.3, 0.7]]) - -verify_garbling_mean_preservation(μ_info, Q_soft, prior) -``` - -## Comparing experiments systematically - -We now study a grid of experiments indexed by their quality parameter $\theta$. - -We will compare: - -1. The spread of posterior beliefs. -2. The value of information under concave uncertainty functions. -3. The integrated-CDF ranking in the two-state case. - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: Posterior distributions for increasing experiment quality - name: fig-blackwell-quality-grid ---- -θ_grid = [0.1, 0.4, 0.7, 1.0] -prior2 = np.array([0.5, 0.5]) - -fig, axes = plt.subplots(2, 2, figsize=(11, 8)) -axes = axes.flat - -for ax, θ in zip(axes, θ_grid): - μ_θ = make_experiment(θ) - posts, probs = compute_posteriors(μ_θ, prior2) - p_s1 = posts[:, 0] - ax.vlines(p_s1, 0, probs, linewidth=8, color="steelblue", alpha=0.7) - ax.axvline(prior2[0], color="tomato", linestyle="--", linewidth=2, - label=f"prior = {prior2[0]:.2f}") - I_H = degroot_value(μ_θ, prior2, entropy) - I_G = degroot_value(μ_θ, prior2, gini_impurity) - ax.set_xlim(0, 1) - ax.set_xlabel(r"posterior $p(s_1 \mid x)$", fontsize=11) - ax.set_ylabel("probability mass", fontsize=11) - ax.text(0.03, 0.94, - f"θ = {θ}\n" f"I_H = {I_H:.3f}\n" f"I_G = {I_G:.3f}", - transform=ax.transAxes, va="top") - ax.legend(fontsize=10) - -plt.tight_layout() -plt.show() -``` - -As $\theta$ rises from 0 to 1, posterior beliefs move toward the vertices $\{0, 1\}$. - -At the same time, the value of information rises under every concave uncertainty function. - ## Application 1: product quality information {cite:t}`kihlstrom1974a` applies Blackwell's theorem to consumer demand for information about product quality. @@ -974,70 +960,77 @@ At the same time, the value of information rises under every concave uncertainty The Blackwell order says that, absent costs, more information is always better for every expected-utility maximizer. -Optimal information demand equates the *marginal value of the standard experiment* to its *marginal cost*. +With costs, the consumer chooses quality investment $\theta$ to maximize *net value*. + +If quality investment translates into experiment accuracy with diminishing returns — say, accuracy $\phi(\theta) = 1 - e^{-a\theta}$ for a rate parameter $a$ — then the marginal value of information eventually decreases in $\theta$. -In the example below, we assume a linear cost $c \cdot \lambda$ and a simple family of experiments $\mu(\theta)$ as above with $c = 0.4$ +With a convex cost $c(\theta) = c \, \theta^2$, the increasing marginal cost eventually overtakes the declining marginal value, producing an interior optimum. ```{code-cell} ipython3 --- mystnb: figure: - caption: Information demand in a simple quality example + caption: Information demand with a quadratic cost name: fig-blackwell-information-demand --- -def consumer_value(θ, prior2, U=entropy, cost_per_unit=0.5): - """Value of purchasing experiment quality θ.""" - μ_t = make_experiment(θ) - gross = degroot_value(μ_t, prior2, U) - net = gross - cost_per_unit * θ - return gross, net +def gross_value(θ, prior2, U=entropy, rate=2): + """Gross value of quality investment θ (diminishing returns).""" + accuracy = 1 - np.exp(-rate * θ) + μ_t = (1 - accuracy) * np.ones((2, 2)) / 2 + accuracy * np.eye(2) + return degroot_value(μ_t, prior2, U) θ_fine = np.linspace(0, 1, 200) -gross_vals = [] -net_vals = [] -marginal_vals = [] - -for θ in θ_fine: - g, n = consumer_value(θ, prior2, entropy, cost_per_unit=0.4) - gross_vals.append(g) - net_vals.append(n) +c = 0.6 +gross_vals = np.array([gross_value(θ, prior2) for θ in θ_fine]) +cost_vals = c * θ_fine**2 +net_vals = gross_vals - cost_vals marginal_vals = np.gradient(gross_vals, θ_fine) +marginal_cost = 2 * c * θ_fine +opt_idx = int(np.argmax(net_vals)) fig, axes = plt.subplots(1, 2, figsize=(12, 4)) ax = axes[0] -ax.plot(θ_fine, gross_vals, label="Gross value I(θ)", +ax.plot(θ_fine, gross_vals, + label="Gross value I(θ)", color="steelblue", linewidth=2) -ax.plot(θ_fine, [0.4 * t for t in θ_fine], - label="Cost c · θ", color="tomato", - linestyle="--", linewidth=2) -ax.plot(θ_fine, net_vals, label="Net value", color="green", linewidth=2) -ax.set_xlabel("experiment quality θ", fontsize=11) -ax.set_ylabel("value (Shannon entropy units)", fontsize=11) +ax.plot(θ_fine, cost_vals, + label=r"Cost $c\theta^2$", + color="tomato", linestyle="--", linewidth=2) +ax.plot(θ_fine, net_vals, + label="Net value", color="green", linewidth=2) +ax.axvline(θ_fine[opt_idx], color="green", + linestyle=":", linewidth=2, + label=f"θ* ≈ {θ_fine[opt_idx]:.2f}") +ax.set_xlabel("quality investment θ", fontsize=11) +ax.set_ylabel("value (entropy units)", fontsize=11) ax.legend(fontsize=10) ax2 = axes[1] -ax2.plot(θ_fine, marginal_vals, label="Marginal value I'(θ)", +ax2.plot(θ_fine, marginal_vals, + label="Marginal value I'(θ)", color="steelblue", linewidth=2) -ax2.axhline(0.4, color="tomato", linestyle="--", linewidth=2, - label="Marginal cost $c = 0.4$") -opt_idx = np.argmin(np.abs(np.array(marginal_vals) - 0.4)) -ax2.axvline(θ_fine[opt_idx], color="green", linestyle=":", - linewidth=2, - label=f"Optimal θ* ≈ {θ_fine[opt_idx]:.2f}") -ax2.set_xlabel("experiment quality θ", fontsize=11) -ax2.set_ylabel("marginal value / marginal cost", fontsize=11) +ax2.plot(θ_fine, marginal_cost, + label=r"Marginal cost $2c\theta$", + color="tomato", linestyle="--", linewidth=2) +ax2.axvline(θ_fine[opt_idx], color="green", + linestyle=":", linewidth=2, + label=f"θ* ≈ {θ_fine[opt_idx]:.2f}") +ax2.set_xlabel("quality investment θ", fontsize=11) +ax2.set_ylabel("marginal value / cost", fontsize=11) ax2.legend(fontsize=10) plt.tight_layout() plt.show() ``` -The optimal demand for information $\theta^*$ occurs where marginal value equals marginal cost. +The optimal investment $\theta^*$ occurs where marginal value equals marginal cost. -Comparative statics follow from shifts in either curve. +Because experiment accuracy has diminishing returns in $\theta$, the marginal value of investment eventually falls below the rising marginal cost, yielding a genuine interior optimum. + +Raising $c$ shifts the marginal cost curve up and reduces $\theta^*$, while a more asymmetric prior shifts the marginal value curve and changes the optimum. ## Application 2: sequential experimental design @@ -1170,16 +1163,16 @@ Kihlstrom's Bayesian exposition places the *posterior distribution* at the cente A more informative experiment generates a more dispersed posterior distribution with the same mean prior. -The right probabilistic language is convex order, and the Blackwell ordering on experiments is isomorphic to the second-order stochastic dominance (SOSD) ordering on distributions of posteriors. +The right probabilistic language is convex order on the simplex of posterior beliefs. -In the two-state case this reduces to the familiar mean-preserving-spread comparison on $[0, 1]$, which can be verified with the integrated-CDF test. +In the two-state case this reduces to the familiar SOSD / integrated-CDF test on $[0, 1]$. DeGroot's contribution is to extend the comparison from particular utility functions to the full class of concave uncertainty functions. ## The Data Processing Inequality and Coarse-Graining -Blackwell's garbling condition — that $\nu = \mu Q$ for some Markov kernel $Q$ — is the same mathematical operation that underlies the **data processing inequality** (DPI) and the **coarse-graining theorem** in information theory, information geometry, and machine learning. +Blackwell's condition that $\nu = \mu Q$ for some Markov kernel $Q$ is the same mathematical operation that underlies the **data processing inequality** (DPI) and the **coarse-graining theorem** in information theory, information geometry, and machine learning. ### The DPI for f-divergences @@ -1200,19 +1193,21 @@ Special cases include: | Total variation TV | $\lvert t - 1 \rvert / 2$ | | Chi-squared $\chi^2$ | $(t-1)^2$ | -The class of f-divergences was introduced independently by {cite}`csiszar1963` and Morimoto (1963); see also {cite}`ali1966`. +The class of f-divergences was introduced independently by {cite:t}`ali1966`, {cite:t}`csiszar1963`, and {cite:t}`morimoto1963`; see also {cite:t}`liese2012`. + +```{prf:theorem} Data Processing Inequality +:label: thm-data-processing -```{admonition} Coarse-Graining Theorem / Data Processing Inequality -:class: important For any f-divergence $D_f$ and any Markov kernel (stochastic transformation) -$\kappa$ — with $P \kappa$ denoting the image of $P$ under $\kappa$ — we have +$\kappa$, with $P \kappa$ denoting the image of $P$ under $\kappa$, we have $$ D_f(P \| Q) \geq D_f(P\kappa \| Q\kappa). $$ -Equality holds if and only if $\kappa$ is induced by a sufficient statistic for -the pair $\{P, Q\}$. +If $\kappa$ is induced by a sufficient statistic for the pair $\{P, Q\}$, then equality holds. + +A converse of this form requires additional hypotheses; a clean binary-model characterization is given below. ``` The proof follows from Jensen's inequality applied to the convex function $f$, using the fact that $\kappa$ is a stochastic matrix {cite}`csiszar1963`. @@ -1238,7 +1233,9 @@ So a more informative experiment always produces *more separated* conditional si The DPI is thus a statement about the *distinguishability* of states: garbling an experiment makes the states harder to tell apart under every statistical measure of separability. -The equality condition links the DPI directly back to Blackwell: $D_f(\mu_1 Q \| \mu_2 Q) = D_f(\mu_1 \| \mu_2)$ for some (hence every) strictly convex $f$ if and only if $Q$ is a sufficient statistic for $(\mu_1, \mu_2)$, i.e., the garbling discards nothing relevant. +For binary experiments, the equality condition links the DPI directly back to Blackwell: $D_f(\mu_1 Q \| \mu_2 Q) = D_f(\mu_1 \| \mu_2)$ for some strictly convex $f$ if and only if $Q$ is a sufficient statistic for $(\mu_1, \mu_2)$. + +Once sufficiency holds, equality follows for every convex $f$ {cite}`liese2012`. ### Information geometry: Chentsov's theorem @@ -1255,13 +1252,13 @@ Equality holds if and only if $\kappa$ is a sufficient statistic for $\theta$. The uniqueness clause is deep: it says that the Fisher information is not merely *one* metric that happens to contract under coarse-graining, but the *only one* with that property. -See Amari and Nagaoka (2000) for a thorough treatment of information geometry and its connections to sufficiency. +See {cite:t}`amari_nagaoka2000` for a thorough treatment of information geometry and its connections to sufficiency. ### The information bottleneck in machine learning -The **information bottleneck** method of {cite}`tishby_pereira_bialek1999` provides a prominent application of the DPI in machine learning. +The **information bottleneck** method of {cite:t}`tishby_pereira_bialek1999` provides a prominent application of the DPI in machine learning. -Given a joint distribution $p(X, Y)$ over an input $X$ and a target $Y$, the goal is to find a compressed representation $T$ — formed by a stochastic mapping $p(T \mid X)$ — that retains as much information about $Y$ as possible while using as few bits as possible to describe $X$. +Given a joint distribution $p(X, Y)$ over an input $X$ and a target $Y$, the goal is to find a compressed representation $T$, formed by a stochastic mapping $p(T \mid X)$, that retains as much information about $Y$ as possible while using as few bits as possible to describe $X$. The method minimizes the Lagrangian @@ -1284,6 +1281,7 @@ The Blackwell ordering explains why no deterministic or random post-processing o In machine learning language the information bottleneck searches among all garblings of $X$ for the one that best preserves relevant information about $Y$ subject to a compression budget. In a deep neural network with input $X$ and target $Y$ and layers $X \to T_1 \to T_2 \to \cdots \to T_L \to \hat{Y}$, each layer's representation is a garbling of the previous one. + The DPI then implies the chain of inequalities $$ @@ -1291,141 +1289,12 @@ I(X;\, Y) \geq I(T_1;\, Y) \geq I(T_2;\, Y) \geq \cdots \geq I(T_L;\, Y), $$ so successive layers can only lose, never gain, information about $Y$. -This observation was placed at the center of the study of what deep networks learn by {cite}`shwartz_ziv_tishby2017`. - -### Demonstrating the coarse-graining theorem numerically - -The following code verifies that applying a progressively more mixing garbling $Q(\alpha)$ — interpolating between the identity matrix ($\alpha = 0$, no garbling) and the fully-mixing uniform kernel ($\alpha = 1$, complete garbling) — decreases *all* f-divergences between the experiment's rows simultaneously. - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: All f-divergences contract monotonically under progressive garbling - name: fig-blackwell-dpi-fdivergences ---- -def kl_divergence_rows(p, q, eps=1e-12): - """D_KL(p || q) for row vectors p, q.""" - p = np.clip(np.asarray(p, float), eps, 1.0) - q = np.clip(np.asarray(q, float), eps, 1.0) - return float(np.sum(p * np.log(p / q))) - - -def squared_hellinger_rows(p, q, eps=1e-12): - """H^2(p, q) = (1/2) * sum (sqrt(p_i) - sqrt(q_i))^2.""" - p = np.clip(np.asarray(p, float), eps, 1.0) - q = np.clip(np.asarray(q, float), eps, 1.0) - return float(0.5 * np.sum((np.sqrt(p) - np.sqrt(q)) ** 2)) - - -def total_variation_rows(p, q): - """TV(p, q) = (1/2) * sum |p_i - q_i|.""" - return float(0.5 * np.sum(np.abs(np.asarray(p, float) - - np.asarray(q, float)))) - - -def make_mixing_garbling(alpha, M=2): - """ - Garbling that interpolates between the identity (alpha=0) - and the fully-mixing uniform kernel (alpha=1). - """ - return (1.0 - alpha) * np.eye(M) + alpha * np.ones((M, M)) / M - - -# Rows of the binary experiment: distribution of signal given each state -row1 = np.array([0.8, 0.2]) # state s_1 -row2 = np.array([0.2, 0.8]) # state s_2 - -alpha_grid = np.linspace(0, 1, 200) -dpi_results = { - r"KL divergence $D_\mathrm{KL}(\nu_1 \| \nu_2)$": [], - r"Squared Hellinger $H^2(\nu_1, \nu_2)$": [], - r"Total variation $\mathrm{TV}(\nu_1, \nu_2)$": [], -} - -for alpha in alpha_grid: - Q = make_mixing_garbling(alpha) - ν1 = row1 @ Q - ν2 = row2 @ Q - dpi_results[r"KL divergence $D_\mathrm{KL}(\nu_1 \| \nu_2)$"].append( - kl_divergence_rows(ν1, ν2)) - dpi_results[r"Squared Hellinger $H^2(\nu_1, \nu_2)$"].append( - squared_hellinger_rows(ν1, ν2)) - dpi_results[r"Total variation $\mathrm{TV}(\nu_1, \nu_2)$"].append( - total_variation_rows(ν1, ν2)) - -fig, ax = plt.subplots(figsize=(9, 4)) -colors_dpi = ["steelblue", "darkorange", "green"] -for (name, vals), c in zip(dpi_results.items(), colors_dpi): - arr = np.array(vals) - ax.plot(alpha_grid, arr / arr[0], label=name, color=c, linewidth=2) - -ax.set_xlabel(r"garbling intensity $\alpha$ (0 = identity, 1 = fully mixed)", - fontsize=11) -ax.set_ylabel("divergence normalised by its value at $\\alpha = 0$", fontsize=11) -ax.legend(fontsize=10) -ax.set_ylim(-0.05, 1.1) -plt.tight_layout() -plt.show() - -print("Divergences at α = 0 (no garbling):") -for name, vals in dpi_results.items(): - print(f" {name.ljust(50)}: {vals[0]:.4f}") -print("\nDivergences at α = 1 (complete garbling):") -for name, vals in dpi_results.items(): - print(f" {name.ljust(50)}: {vals[-1]:.2e}") -``` - -All three f-divergences decrease monotonically to zero as the experiment is progressively garbled toward complete mixing. - -This confirms the coarse-graining theorem: a single Blackwell garbling simultaneously contracts every f-divergence between the conditional distributions of signals given states. - -The following code makes the connection between the Blackwell ordering and the DPI explicit. - -It computes multiple f-divergences for experiments of increasing quality $\theta$ (the same parameterization used earlier) and verifies that Blackwell-higher experiments have strictly larger f-divergences. - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: More informative experiments have larger f-divergences between rows - name: fig-blackwell-dpi-quality ---- -θ_vals = np.linspace(0, 1, 100) -dpi_quality = { - r"KL divergence $D_\mathrm{KL}$": [], - r"Squared Hellinger $H^2$": [], - r"Total variation TV": [], -} - -for θ in θ_vals: - μ_θ = make_experiment(θ, N=2) - r1, r2 = μ_θ[0], μ_θ[1] - dpi_quality[r"KL divergence $D_\mathrm{KL}$"].append( - kl_divergence_rows(r1, r2)) - dpi_quality[r"Squared Hellinger $H^2$"].append( - squared_hellinger_rows(r1, r2)) - dpi_quality[r"Total variation TV"].append( - total_variation_rows(r1, r2)) - -fig, ax = plt.subplots(figsize=(9, 4)) -for (name, vals), c in zip(dpi_quality.items(), colors_dpi): - arr = np.array(vals) - ax.plot(θ_vals, arr / (arr[-1] + 1e-15), label=name, color=c, linewidth=2) - -ax.set_xlabel(r"experiment quality $\theta$ (0 = uninformative, 1 = perfect)", - fontsize=11) -ax.set_ylabel("divergence normalised by value at $\\theta = 1$", fontsize=11) -ax.legend(fontsize=10) -plt.tight_layout() -plt.show() -``` -Every f-divergence between the rows $(\mu_1, \mu_2)$ is strictly increasing in experiment quality $\theta$. +This observation was placed at the center of the study of what deep networks learn by {cite}`shwartz_ziv_tishby2017`. -At $\theta = 0$ the rows are both $[0.5, 0.5]$, so every divergence is zero. +{numref}`fig-blackwell-value-by-quality` already illustrates this: as experiment quality $\theta$ increases, every measure of informativeness rises monotonically. -At $\theta = 1$ the rows are $[1, 0]$ and $[0, 1]$, so the KL-divergence is infinite and the Hellinger distance and total variation reach their maximum values. +The DPI says the same thing in reverse: garbling (decreasing $\theta$) can only contract these measures. ### Summary of the DPI–Blackwell correspondence @@ -1435,7 +1304,7 @@ The table below collects the precise correspondence between Blackwell's framewor |:---|:---| | Garbling $\nu = \mu Q$ | Applying Markov kernel $\kappa$ to a pair $(P, Q) = (\mu_1, \mu_2)$ | | $\mu \geq \nu$ in Blackwell order | $D_f(\mu_1 \| \mu_2) \geq D_f(\nu_1 \| \nu_2)$ for every f-divergence | -| Sufficiency ($Q$ discards nothing) | Equality in DPI for every strictly convex $f$ | +| Sufficiency ($Q$ discards nothing) | Equality in DPI; in binary models, one strictly convex $f$ already characterizes sufficiency | | DeGroot value $I(\mu; U_H)$ | Mutual information $I(\tilde{x}_\mu;\, \tilde{s})$ (Shannon DPI) | | Posterior spreads under $\mu$ vs $\nu$ | $D_f$ between rows larger under $\mu$ | | Blackwell theorem (economic $\Leftrightarrow$ garbling) | DPI for all $f$ $\Leftrightarrow$ single Markov kernel witnesses dominance | @@ -1445,124 +1314,36 @@ The table below collects the precise correspondence between Blackwell's framewor ## Relation to Bayesian likelihood-ratio learning -The lecture {doc}`likelihood_bayes` studies Bayesian learning in a setting that is a special, dynamic instance of everything developed here. - -This section transports concepts back and forth between the two lectures. - -### Setup: states, experiments, and IID draws - -In {doc}`likelihood_bayes` the unknown "state of the world" is which density nature chose permanently: nature drew the data either from $f$ or from $g$, but not which one is known to the observer. - -This is a two-element finite state space - -$$ -S = \{s_1, s_2\} \qquad \text{with } s_1 \leftrightarrow f,\quad s_2 \leftrightarrow g. -$$ - -The Bayesian prior $\pi_0 \in [0,1]$ on $s_1 = f$ plays exactly the role of the prior $p \in P$ on the probability simplex in the present lecture. - -A single draw is an experiment. - -A single observation $w_t$ constitutes a Blackwell experiment with signal space $X$ and Markov kernel +The lecture {doc}`likelihood_bayes` is a dynamic two-state special case of the framework developed here. -$$ -\mu = \begin{pmatrix} f(\cdot) \\ g(\cdot) \end{pmatrix}, -$$ +Let $S = \{s_1, s_2\}$ with $s_1 \leftrightarrow f$ and $s_2 \leftrightarrow g$, where $f$ and $g$ are the two candidate data-generating densities. -where row $i$ is the conditional density of the signal given state $s_i$: -$\mu(\cdot \mid s_1) = f(\cdot)$ and $\mu(\cdot \mid s_2) = g(\cdot)$. +Then a single observation is a Blackwell experiment with rows $f(\cdot)$ and $g(\cdot)$, and the history $w^t = (w_1, \ldots, w_t)$ defines a richer experiment $\mu_t$. -This is the continuous-signal analogue of the $N \times M$ Markov matrix studied above (with $N = 2$ states and a continuum of signals instead of $M$ discrete ones). +Because one can always discard the last $t-s$ observations, $\mu_t$ Blackwell-dominates $\mu_s$ for every $t > s$. -$t$ IID draws form a richer experiment. - -Observing the history $w^t = (w_1, \ldots, w_t)$ is a strictly more informative Blackwell experiment than observing any sub-history $w^s$ for $s < t$, because the conditional joint densities for $w^t$ are +The likelihood-ratio process $$ -\mu_t(\cdot \mid s_1) = f(w_1) f(w_2) \cdots f(w_t), -\qquad -\mu_t(\cdot \mid s_2) = g(w_1) g(w_2) \cdots g(w_t). +L(w^t) = \prod_{i=1}^t \frac{f(w_i)}{g(w_i)} $$ -The experiment $\mu_t$ Blackwell-dominates $\mu_s$ for any $t > s$: you can always garble $w^t$ down to $w^s$ by discarding the last $t - s$ draws, which is an explicit stochastic transformation $Q$ satisfying $\mu_s = \mu_t Q$. - -The reverse is impossible — you cannot reconstruct information from fewer draws. - -This is why more data is always weakly better for every expected-utility maximiser (the economic criterion of Blackwell's theorem). - -### Sufficient statistics and posteriors - -The key formula in {doc}`likelihood_bayes` is +is a sufficient statistic for $\mu_t$, and the posterior $$ -\pi_{t+1} = \frac{\pi_0 \, L(w^{t+1})}{\pi_0 \, L(w^{t+1}) + 1 - \pi_0}, -\qquad -L(w^t) = \prod_{i=1}^t \frac{f(w_i)}{g(w_i)}. +\pi_t = \Pr(s_1 \mid w^t) += \frac{\pi_0 L(w^t)}{\pi_0 L(w^t) + 1 - \pi_0} $$ -Because $\pi_{t+1}$ depends on $w^t$ **only through** $L(w^t)$, the likelihood ratio process is a **sufficient statistic** for the experiment $\mu_t$. - -In Blackwell's language, the experiment "report $L(w^t)$" is informationally equivalent to "report $w^t$": passing $w^t$ through the deterministic map $w^t \mapsto L(w^t)$ is a (degenerate) stochastic transformation that discards nothing relevant to discriminating $f$ from $g$. - -The posterior lives on the 1-simplex and is Kihlstrom's standard experiment. - -With $N = 2$ states the probability simplex $P$ collapses to the unit interval $[0,1]$. - -Kihlstrom's standard experiment records only the posterior - -$$ -\pi_t = \Pr(s = f \mid w^t), -$$ - -which is the sufficient statistic that the Bayesian tracks throughout. - -The *distribution* of $\pi_t$ over all possible histories $w^t$ is Kihlstrom's $\hat{\mu}^c$ — the distribution of posteriors induced by the experiment $\mu_t$ starting from prior $\pi_0 = c$. - -### Why more data always helps - -{doc}`likelihood_bayes` proves that $\{\pi_t\}$ is a **martingale**: - -$$ -E[\pi_t \mid \pi_{t-1}] = \pi_{t-1}, -$$ - -and in particular $E[\pi_t] = \pi_0$ for all $t$. - -This is exactly the **mean-preservation** condition that sits at the centre of Kihlstrom's reformulation: the distribution of posteriors $\hat{\mu}^c$ must satisfy $\int_P p \, \hat{\mu}^c(dp) = c$. - -Mean preservation is not a special feature of this two-state example; it is an exact consequence of Bayes' law for *any* experiment. - -Blackwell's theorem explains why more data always helps. - -Kihlstrom's reformulation states: - -> $\mu_t \geq \mu_s$ in Blackwell's sense if and only if $\hat{\mu}_t^c$ is a **mean-preserving spread** of $\hat{\mu}_s^c$, i.e., posteriors under $\mu_t$ are more dispersed than under $\mu_s$. - -In the {doc}`likelihood_bayes` setting this means the distribution of $\pi_t$ is a mean-preserving spread of the distribution of $\pi_s$ for $t > s$: more data pushes posteriors further from the prior toward either $0$ or $1$. - -The almost-sure convergence $\pi_t \to 0$ or $1$ is the limit of this spreading process — perfect information resolves all uncertainty, collapsing the distribution to a degenerate point mass at a vertex of the simplex. - -DeGroot uncertainty functions connect to mutual information. - -The Shannon entropy of the two-state posterior is - -$$ -U_H(\pi) = -\pi \log \pi - (1-\pi)\log(1-\pi). -$$ - -DeGroot's value of information for the experiment that generates $t$ draws is - -$$ -I(\mu_t;\, U_H) = U_H(\pi_0) - E[U_H(\pi_t)], -$$ +is Kihlstrom's standard experiment in this two-state setting. -which equals the **mutual information** between the history $w^t$ and the unknown state. +Its martingale property, $E[\pi_t] = \pi_0$, is exactly the mean-preservation result proved above for posterior distributions. -Because $\mu_t$ Blackwell-dominates $\mu_s$ for $t > s$, Blackwell's theorem guarantees $I(\mu_t; U) \geq I(\mu_s; U)$ for *every* concave uncertainty function $U$ — more draws reduce expected uncertainty under every such measure, not just Shannon entropy. +Likewise, $\mu_t \geq \mu_s$ implies that the distribution of $\pi_t$ is a mean-preserving spread of the distribution of $\pi_s$, so additional data pushes beliefs farther toward $0$ and $1$ while lowering expected uncertainty under every concave uncertainty function. ### Summary table -The following table collects the translation between concepts in the two lectures. +The table below records the dictionary between the two lectures without repeating the earlier arguments. | Concept in {doc}`likelihood_bayes` | Concept in this lecture | |---|---| @@ -1570,9 +1351,9 @@ The following table collects the translation between concepts in the two lecture | Densities $f(\cdot)$, $g(\cdot)$ | Rows of experiment matrix $\mu$ | | Single draw $w_t$ | Blackwell experiment with continuous signal space | | History $w^t$ of $t$ IID draws | Richer experiment $\mu_t$ Blackwell-dominating $\mu_s$, $s < t$ | -| Likelihood ratio $L(w^t)$ | Sufficient statistic / standard experiment | +| Likelihood ratio $L(w^t)$ | Sufficient statistic for $\mu_t$ | | Prior $\pi_0$ | Prior $p \in P$ on the 1-simplex $[0,1]$ | -| Posterior $\pi_t$ | Posterior random variable on $P = [0,1]$ | +| Posterior $\pi_t$ | Posterior on $P = [0,1]$ (Kihlstrom's standard experiment) | | Distribution of $\pi_t$ across histories | $\hat{\mu}^c$ (Kihlstrom's posterior distribution) | | Martingale property $E[\pi_t] = \pi_0$ | Mean preservation of $\hat{\mu}^c$ | | $\pi_t \to 0$ or $1$ almost surely | Posteriors spread to vertices (MPS in the limit) | diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md index e6905934c..9ac7165ba 100644 --- a/lectures/merging_of_opinions.md +++ b/lectures/merging_of_opinions.md @@ -36,9 +36,9 @@ The theorem asks a simple question: The answer is yes under an absolute-continuity condition. -If $P \ll Q$, then the conditional distributions under $P$ and $Q$ over the entire future path merge in total variation, $Q$-almost surely. +If $Q \ll P$ (that is, $P$ dominates $Q$), then the conditional distributions under $P$ and $Q$ over the entire future path merge in total variation, $Q$-almost surely. -If in addition $Q \ll P$, then the same conclusion holds under both agents' probabilities. +If in addition $P \ll Q$ (so that $P \sim Q$), then the same conclusion holds under both agents' probabilities. This result connects to several other ideas: @@ -66,7 +66,9 @@ from scipy.special import betaln ### The sequence space and its filtration -Let $(S, \mathscr{S})$ be a measurable space, called the signal space. +Let $(S, \mathscr{S})$ be a standard Borel space (i.e., a measurable space isomorphic to a Borel subset of a complete separable metric space), called the signal space. + +The standard Borel assumption guarantees the existence of regular conditional distributions, which the theorem requires. Set $\Omega = S^{\mathbb{N}}$, the set of all infinite sequences $\omega = (x_1, x_2, \ldots)$ with $x_n \in S$, equipped with the product @@ -80,6 +82,8 @@ $$ so $\mathscr{F}_1 \subseteq \mathscr{F}_2 \subseteq \cdots \subseteq \mathscr{F}$. +Define the **tail $\sigma$-algebra** $\mathscr{F}_\infty = \sigma\!\left(\bigcup_{n \geq 1} \mathscr{F}_n\right)$, which encodes everything that can eventually be learned. + The collection $\{\mathscr{F}_n\}_{n \geq 1}$ is the **natural filtration** generated by the observation process; $\mathscr{F}_n$ encodes everything that can be learned from the first $n$ data points. @@ -194,43 +198,43 @@ Our main tool is the Radon–Nikodym derivative process. ### The likelihood ratio -Since $P \ll Q$ implies $P_n \ll Q_n$ for every $n$, the Radon–Nikodym +Since $Q \ll P$ implies $Q_n \ll P_n$ for every $n$, the Radon–Nikodym theorem guarantees the existence of the likelihood ratio $$ -Z_n = \frac{dP_n}{dQ_n}, \qquad Z_n \geq 0 \;\; Q\text{-a.s.}, -\qquad \mathbb{E}_Q[Z_n] = 1. +Z_n = \frac{dQ_n}{dP_n}, \qquad Z_n \geq 0 \;\; P\text{-a.s.}, +\qquad \mathbb{E}_P[Z_n] = 1. $$ -The key structural property is that global absolute continuity $P \ll Q$ -implies the existence of an overall Radon–Nikodym derivative $Z = dP/dQ$ +The key structural property is that global absolute continuity $Q \ll P$ +implies the existence of an overall Radon–Nikodym derivative $Z = dQ/dP$ on all of $(\Omega, \mathscr{F})$, and $$ -Z_n = \mathbb{E}_Q[Z \,|\, \mathscr{F}_n] \qquad Q\text{-a.s.} +Z_n = \mathbb{E}_P[Z \,|\, \mathscr{F}_n] \qquad P\text{-a.s.} $$ That is, $\{Z_n, \mathscr{F}_n\}_{n \geq 1}$ is a non-negative, uniformly -integrable $Q$-martingale. +integrable $P$-martingale. ```{prf:lemma} Martingale Convergence :label: martingale_convergence The likelihood-ratio process $\{Z_n\}$ satisfies: -1. $Z_n \to Z_\infty$ $Q$-almost surely as $n \to \infty$. -2. $Z_\infty = \mathbb{E}_Q[Z \,|\, \mathscr{F}_\infty]$ $Q$-a.s. -3. $Z_n \to Z_\infty$ in $L^1(Q)$: $\;\mathbb{E}_Q[|Z_n - Z_\infty|] \to 0$. +1. $Z_n \to Z_\infty$ $P$-almost surely as $n \to \infty$. +2. $Z_\infty = \mathbb{E}_P[Z \,|\, \mathscr{F}_\infty]$ $P$-a.s. +3. $Z_n \to Z_\infty$ in $L^1(P)$: $\;\mathbb{E}_P[|Z_n - Z_\infty|] \to 0$. *Proof sketch.* Non-negativity and the martingale property give boundedness -in $L^1(Q)$. +in $L^1(P)$. Then almost-sure convergence follows from Doob's martingale convergence theorem {cite:t}`doob1953`. Uniform integrability (which follows -from $Z \in L^1(Q)$ via the conditional Jensen inequality) upgrades this to -$L^1(Q)$ convergence. $\square$ +from $Z \in L^1(P)$ via the conditional Jensen inequality) upgrades this to +$L^1(P)$ convergence. $\square$ ``` ### Connecting conditional measures to the likelihood ratio @@ -238,30 +242,30 @@ $L^1(Q)$ convergence. $\square$ The following identity connects the likelihood ratio to the conditional distributions. On the set $\{Z_n > 0\}$, the Radon–Nikodym derivative of -$P(\,\cdot\,|\,\mathscr{F}_n)$ with respect to $Q(\,\cdot\,|\,\mathscr{F}_n)$ +$Q(\,\cdot\,|\,\mathscr{F}_n)$ with respect to $P(\,\cdot\,|\,\mathscr{F}_n)$ is $$ -\frac{d\,P(\,\cdot\,|\,\mathscr{F}_n)}{d\,Q(\,\cdot\,|\,\mathscr{F}_n)} +\frac{d\,Q(\,\cdot\,|\,\mathscr{F}_n)}{d\,P(\,\cdot\,|\,\mathscr{F}_n)} = \frac{Z_\infty}{Z_n} -\qquad Q\text{-a.s. on } \{Z_n > 0\}. +\qquad P\text{-a.s. on } \{Z_n > 0\}. $$ Applying the total-variation formula with $f = Z_\infty / Z_n$ then gives $$ d_n -= \mathbb{E}_{Q(\cdot|\mathscr{F}_n)}\!\left[\left(\frac{Z_\infty}{Z_n} - 1\right)^{\!+}\right] -= 1 - \mathbb{E}_{Q(\cdot|\mathscr{F}_n)}\!\left[\min\!\left(\frac{Z_\infty}{Z_n},\,1\right)\right]. += \mathbb{E}_{P(\cdot|\mathscr{F}_n)}\!\left[\left(\frac{Z_\infty}{Z_n} - 1\right)^{\!+}\right] += 1 - \mathbb{E}_{P(\cdot|\mathscr{F}_n)}\!\left[\min\!\left(\frac{Z_\infty}{Z_n},\,1\right)\right]. $$ -Multiplying through by $Z_n$ and integrating with respect to $Q$: +Multiplying through by $Z_n$ and taking the $P$-expectation (then using $\mathbb{E}_P[Z_n \, g(\mathscr{F}_n)] = \mathbb{E}_Q[g(\mathscr{F}_n)]$ for $\mathscr{F}_n$-measurable $g$): $$ -2\,\mathbb{E}_Q[d_n] \;\leq\; \mathbb{E}_Q[|Z_\infty - Z_n|], +2\,\mathbb{E}_Q[d_n] \;=\; \mathbb{E}_P[|Z_\infty - Z_n|], $$ -So the $L^1$ convergence of the martingale controls how fast the total variation distance goes to zero. +So the $L^1(P)$ convergence of the martingale controls how fast the total variation distance goes to zero. ## The Blackwell–Dubins theorem @@ -270,7 +274,7 @@ So the $L^1$ convergence of the martingale controls how fast the total variation :label: blackwell_dubins Let $P$ and $Q$ be probability measures on $(\Omega, \mathscr{F})$ with -$P \ll Q$. +$Q \ll P$. Define @@ -278,18 +282,18 @@ $$ d_n = \bigl\|P(\,\cdot\,|\,\mathscr{F}_n) - Q(\,\cdot\,|\,\mathscr{F}_n)\bigr\|_{\mathrm{TV}}. $$ -Then $d_n \to 0$ almost surely under $Q$ (and hence also under $P$). +Then $d_n \to 0$ $Q$-almost surely. ``` The proof has three steps. Step 1. Representation of $d_n$ via $Z_n$. -As shown above, $d_n$ can be written in terms of $Z_\infty / Z_n$. +As shown above, $d_n$ can be written in terms of $Z_\infty / Z_n$, where $Z_n = \mathbb{E}_P[Z \,|\, \mathscr{F}_n]$ and $Z = dQ/dP$. -This reduces the problem to a statement about one martingale under $Q$. +This reduces the problem to a statement about one martingale under $P$. -Step 2. $\{d_n\}$ is a $Q$-supermartingale. +Step 2. $\{d_n\}$ is a non-negative supermartingale. Conditioning on more information reduces distinguishability on average. @@ -310,25 +314,27 @@ Step 3. The almost-sure limit is zero. From Step 1 and the $L^1$ bound: $$ -\mathbb{E}_Q[d_n] \leq \tfrac{1}{2}\,\mathbb{E}_Q[|Z_\infty - Z_n|] \to 0. +\mathbb{E}_Q[d_n] = \tfrac{1}{2}\,\mathbb{E}_P[|Z_\infty - Z_n|] \to 0. $$ -Hence $d_n \to 0$ in $L^1(Q)$ and therefore in probability. +The right-hand side vanishes by $L^1(P)$ convergence of the martingale. -Since $d_n$ already converges almost surely, its limit must satisfy $d_\infty = 0$ $Q$-a.s. +Hence $d_n \to 0$ in $L^1(Q)$ and therefore in $Q$-probability. -Because $P \ll Q$, the same conclusion also holds $P$-almost surely. $\square$ +Since $d_n$ already converges $Q$-almost surely, its limit must satisfy $d_\infty = 0$ $Q$-a.s. $\square$ ```{prf:remark} One-Sided vs. Mutual Absolute Continuity :label: one_sided_vs_mutual -The theorem requires only $P \ll Q$, not $Q \ll P$. +The theorem requires only $Q \ll P$, not $P \ll Q$. -Since $P \ll Q$ means every $Q$-null set is also $P$-null, the conclusion $d_n \to 0$ $Q$-a.s. automatically implies $d_n \to 0$ $P$-a.s. +One-sided absolute continuity $Q \ll P$ gives merging $Q$-almost surely. -One-sided absolute continuity is therefore enough for merging under both agents' measures. +Since $Q \ll P$ means every $P$-null set is also $Q$-null, $Q$-a.s. convergence does *not* automatically imply $P$-a.s. convergence. -Mutual absolute continuity $P \sim Q$ adds symmetry: the proof can also be run with $P$ as the reference measure and $Q$ as the alternative, but the conclusion is the same. +To conclude that $d_n \to 0$ under *both* agents' measures, one needs mutual absolute continuity $P \sim Q$. + +With $P \ll Q$ added, the proof can be run with the roles of $P$ and $Q$ swapped, yielding $d_n \to 0$ $P$-a.s. as well. ``` ```{prf:remark} Sharpness @@ -558,7 +564,7 @@ mystnb: figure: caption: | Merging in the Beta–Bernoulli example. - The four panels show posterior predictive means, the total-variation distance $d_n$, the likelihood-ratio martingale, and posterior densities at selected horizons. + The four panels show posterior predictive means, the total-variation distance $d_n$, the log likelihood ratio $\log Z_n$, and posterior densities at selected horizons. name: fig-merging-of-opinions-beta-bernoulli --- p_true = 0.65 @@ -621,10 +627,12 @@ from matplotlib.lines import Line2D handles = [ Line2D([0], [0], color='black', lw=2, label='agent 1'), Line2D([0], [0], color='black', lw=2, ls='--', label='agent 2'), - Line2D([0], [0], color=colors[0], lw=2, label='$n=0$'), - Line2D([0], [0], color=colors[-1], lw=2, label=f'$n={n_steps}$'), - Line2D([0], [0], color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$') ] +for epoch, col in zip(epochs, colors): + handles.append(Line2D([0], [0], color=col, lw=2, label=f'$n={epoch}$')) +handles.append( + Line2D([0], [0], color='black', lw=1.0, ls=':', label=f'$p^*={p_true}$') +) ax.legend(handles=handles, fontsize=8) ax.set_ylim(bottom=0) @@ -658,7 +666,7 @@ mystnb: figure: caption: | Almost-sure merging across many sample paths. - The left panel plots the total-variation distance and the right panel plots the likelihood-ratio martingale. + The left panel plots the total-variation distance and the right panel plots the log likelihood ratio $\log Z_n$. name: fig-merging-of-opinions-many-paths --- N_paths = 80 @@ -766,7 +774,7 @@ This is only an illustration, not a proof, because it uses unconditional average ## Failure of merging: mutual singularity -What happens when the hypothesis $P \ll Q$ fails? +What happens when the hypothesis $Q \ll P$ fails? The singular case is the cleanest counterexample. @@ -850,7 +858,7 @@ More data does not reconcile the agents, because each rules out paths the other ## Kakutani's theorem: when does merging hold? A natural question is: for which product measures does the Blackwell–Dubins -hypothesis $P \ll Q$ hold? +hypothesis $Q \ll P$ hold? For infinite product measures, the answer is given by a classical result of {cite:t}`kakutani1948`. @@ -865,10 +873,10 @@ dominating measure $\lambda$, the **Hellinger affinity** is $$ \rho_n = \int_S \sqrt{\frac{dP_n}{d\lambda} \cdot \frac{dQ_n}{d\lambda}}\,d\lambda -\;\in\; (0, 1]. +\;\in\; [0, 1]. $$ -$\rho_n = 1$ if and only if $P_n = Q_n$. +$\rho_n = 1$ if and only if $P_n = Q_n$; $\rho_n = 0$ if and only if $P_n \perp Q_n$. ``` For two specific one-dimensional families: @@ -891,7 +899,7 @@ $$ :label: kakutani_dichotomy Let $P = \bigotimes_{n=1}^\infty P_n$ and $Q = \bigotimes_{n=1}^\infty Q_n$ -be infinite product measures. +be infinite product measures whose factors are pairwise equivalent: $P_n \sim Q_n$ for every $n$. Then either $P \sim Q$ or $P \perp Q$; there is no intermediate case. @@ -1082,15 +1090,13 @@ A key subtlety on $[0,+\infty)$ is that local absolute continuity does *not* imp Suppose $Z_t$ is a true $Q$-martingale for every finite horizon and let $Z_t \to Z_\infty$ $Q$-a.s. -If $\{Z_t\}$ is uniformly integrable on $[0,\infty)$, then $P \ll Q$ on $\mathscr{F}_\infty$ and +If $\{Z_t\}$ is uniformly integrable on $[0,\infty)$, then $P \ll Q$ on $\mathscr{F}_\infty$ with $dP/dQ = Z_\infty$. -$$ -\frac{dP}{dQ} = Z_\infty. -$$ +For the Blackwell–Dubins conclusion we need $Q \ll P$ on $\mathscr{F}_\infty$ (the reverse direction). -If uniform integrability fails, then global absolute continuity on $\mathscr{F}_\infty$ can fail. +In many standard settings, including deterministic drifts satisfying the energy condition below, the measures are in fact *equivalent* ($P \sim Q$) on $\mathscr{F}_\infty$, so both directions hold. -In that case one can no longer conclude merging from the discrete-time argument alone. +If uniform integrability fails, then global absolute continuity on $\mathscr{F}_\infty$ can fail. In many standard examples, including a non-zero constant drift, the measures are in fact singular on $\mathscr{F}_\infty$. ``` @@ -1103,9 +1109,11 @@ $$ Informally, this says the total amount of information separating the two measures over the infinite horizon is finite. +Under the energy condition, $P \sim Q$ on $\mathscr{F}_\infty$, so Blackwell–Dubins applies and merging holds under both measures. + When $\theta$ is a non-zero constant, the condition fails, the measures are singular on $\mathscr{F}_\infty$, and merging does not occur. -Whenever $P \ll Q$ on $\mathscr{F}_\infty$ is established, the proof of the +Whenever $Q \ll P$ on $\mathscr{F}_\infty$ is established, the proof of the continuous-time Blackwell–Dubins result is identical to the discrete-time proof. @@ -1113,7 +1121,7 @@ $\{d_t, \mathscr{F}_t\}$ is a non-negative $Q$-supermartingale in $[0,1]$, so $d_t \to d_\infty$ $Q$-a.s. The $L^1$ bound -$\mathbb{E}_Q[d_t] \leq \tfrac{1}{2}\mathbb{E}_Q[|Z_t - Z_\infty|] \to 0$ +$\mathbb{E}_Q[d_t] = \tfrac{1}{2}\mathbb{E}_P[|Z_t - Z_\infty|] \to 0$ forces $d_\infty = 0$. @@ -1127,18 +1135,22 @@ Suppose data $(x_1, x_2, \ldots)$ are drawn from the true measure $Q^*$. An agent holds a prior $\pi$ over a family $\{Q_\theta : \theta \in \Theta\}$, inducing a marginal $P = \int Q_\theta\,\pi(d\theta)$. -If $P \ll Q^*$, then Blackwell–Dubins gives +If $Q^* \ll P$ (i.e., the agent's marginal model dominates the truth), then Blackwell–Dubins gives $$ \bigl\|P(\,\cdot\,|\,x_1,\ldots,x_n) - Q^*(\,\cdot\,|\,x_1,\ldots,x_n)\bigr\|_{\mathrm{TV}} \to 0 \quad Q^*\text{-a.s.} $$ -This is a strong form of Bayesian consistency. +This is a strong form of Bayesian consistency: the agent's predictions merge with the truth under the true measure. + +A prior assigning positive mass to a neighbourhood of the true parameter typically guarantees *local* absolute continuity $Q^*_n \ll P_n$ for every finite horizon $n$, but not the global condition $Q^* \ll P$ on $\mathscr{F}_\infty$ that Blackwell–Dubins requires. + +For example, in the Beta–Bernoulli model with a non-atomic prior $\pi$, the mixture $P = \int \mathrm{Bernoulli}(p)^{\infty}\,\pi(dp)$ satisfies $Q^*_n \ll P_n$ for every $n$, yet $Q^* \not\ll P$ globally because the set $\{\lim k_n/n = p^*\}$ has $Q^*$-measure one but $P$-measure zero (different Bernoulli product measures are mutually singular). -In many dominated parametric models, absolute continuity follows from the prior assigning positive mass to a suitable neighbourhood of the true parameter. +Global absolute continuity does hold under additional structure, for instance when the parameter space is finite or the model is sufficiently regular to admit a Doob-consistency argument. -{cite:t}`diaconis1986` show that this absolute-continuity condition is not just sufficient but essentially *necessary* for Doob consistency. +{cite:t}`diaconis1986` study the consistency of Bayes estimates and show, among other results, that the interplay between local and global absolute continuity plays a central role in ensuring posterior convergence. When $P \perp Q^*$, there are events of probability one under $Q^*$ that have probability zero under $P$, so the agent's beliefs remain fundamentally misspecified. @@ -1184,10 +1196,10 @@ It tells us that $d_n \to 0$, but not how fast. The bound $$ -\mathbb{E}_Q[d_n] \leq \tfrac{1}{2}\,\mathbb{E}_Q[|Z_n - Z_\infty|] +\mathbb{E}_Q[d_n] = \tfrac{1}{2}\,\mathbb{E}_P[|Z_n - Z_\infty|] $$ -shows that the rate of merging is controlled by the $L^1$ convergence rate of the likelihood-ratio martingale. +shows that the rate of merging is controlled by the $L^1(P)$ convergence rate of the likelihood-ratio martingale. In regular parametric examples, one often sees $n^{-1/2}$-type behavior. @@ -1249,12 +1261,12 @@ That is consistent with $n^{-1/2}$ scaling in this simulation. The logical flow underlying the Blackwell–Dubins theorem is: $$ -P \ll Q +Q \ll P \;\Longrightarrow\; -Z = \frac{dP}{dQ} \in L^1(Q) +Z = \frac{dQ}{dP} \in L^1(P) \;\Longrightarrow\; -Z_n = \mathbb{E}_Q[Z \,|\, \mathscr{F}_n] -\xrightarrow{L^1(Q)} +Z_n = \mathbb{E}_P[Z \,|\, \mathscr{F}_n] +\xrightarrow{L^1(P)} Z_\infty \;\Longrightarrow\; d_n \xrightarrow{Q\text{-a.s.}} 0. @@ -1262,15 +1274,15 @@ $$ Takeaways: -1. One-sided absolute continuity $P \ll Q$ suffices for merging under both measures. +1. One-sided absolute continuity $Q \ll P$ gives merging $Q$-almost surely. For merging under *both* measures, one needs mutual absolute continuity $P \sim Q$. -2. The likelihood-ratio martingale $Z_n$ and its $L^1$ convergence drive the result. +2. The likelihood-ratio martingale $Z_n = \mathbb{E}_P[Z|\mathscr{F}_n]$ and its $L^1(P)$ convergence drive the result. 3. More data can only reduce (in expectation) the difficulty of distinguishing two hypotheses. -4. Kakutani's theorem characterises when $P \ll Q$ holds for product measures: precisely when $\sum_n (1 - \rho_n) < \infty$. +4. For infinite product measures, Kakutani's theorem gives a sharp equivalence-versus-singularity dichotomy: either $P \sim Q$ (when $\sum_n (1 - \rho_n) < \infty$) or $P \perp Q$ (when the sum diverges), with no intermediate case. -5. For product measures, either $P \sim Q$ and merging occurs, or $P \perp Q$ and disagreement persists forever. +5. When $P \sim Q$, Blackwell–Dubins applies and merging occurs under both measures; when $P \perp Q$, disagreement persists forever. ### Applications in economics @@ -1292,6 +1304,6 @@ Some influential applications and extensions are: ### A companion result from probability -{cite}`DiaconisFreedman1986` establish consistency of Bayes estimates under misspecification, a result in the same intellectual tradition as Blackwell--Dubins. +{cite}`DiaconisFreedman1986` study the consistency of Bayes estimates, proving equivalences involving posterior convergence and providing counterexamples that highlight the role of the prior. -It is routinely co-cited with the merging theorem in the economics learning literature as providing the probabilistic underpinning for Bayesian consistency. +Their work is in the same intellectual tradition as Blackwell–Dubins and is routinely co-cited with the merging theorem in the economics learning literature. diff --git a/lectures/survival_recursive_preferences.md b/lectures/survival_recursive_preferences.md index 51b00a749..0548d3323 100644 --- a/lectures/survival_recursive_preferences.md +++ b/lectures/survival_recursive_preferences.md @@ -118,7 +118,7 @@ $$ F(C, \nu) = \beta \frac{C^{1-\gamma}}{1-\gamma} \cdot -\frac{(1-\gamma) - (1-\rho)\nu / \beta}{\rho - \gamma} +\left(\frac{(1-\gamma) - (1-\rho)\nu / \beta}{\rho - \gamma}\right)^{(\gamma - \rho)/(1-\rho)} $$ (eq:felicity) where $\nu$ is the endogenous discount rate. @@ -1014,8 +1014,8 @@ When agent 2 is the large agent, Proposition 5.1 in {cite:t}`Borovicka2020` impl $$ \lim_{\upsilon \searrow 0} r(\upsilon) -= \beta + \rho \mu_Y + \omega^2 \sigma_Y -+ \frac{1}{2} (1 - \gamma) \sigma_Y^2 += \beta + \rho \left(\mu_Y + \omega^2 \sigma_Y ++ \frac{1}{2} (1 - \gamma) \sigma_Y^2\right) - \frac{1}{2} \gamma \sigma_Y^2 $$ (eq:riskfree) From 9fd0773de2417d2a942cc3e1e3f8a31292d6d760 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 4 Apr 2026 20:16:30 +1100 Subject: [PATCH 15/20] updates --- lectures/enet_binomial.csv | 200 ------------------------- lectures/enet_poisson.csv | 200 ------------------------- lectures/lasso_data.csv | 300 ------------------------------------- lectures/y_arma_data.csv | 251 ------------------------------- 4 files changed, 951 deletions(-) delete mode 100644 lectures/enet_binomial.csv delete mode 100644 lectures/enet_poisson.csv delete mode 100644 lectures/lasso_data.csv delete mode 100644 lectures/y_arma_data.csv diff --git a/lectures/enet_binomial.csv b/lectures/enet_binomial.csv deleted file mode 100644 index d943fae18..000000000 --- a/lectures/enet_binomial.csv +++ /dev/null @@ -1,200 +0,0 @@ -1.00,0.46,-0.13,1.29,-0.47,-0.17 -0.00,0.96,0.40,0.98,0.02,-1.02 -0.00,0.23,0.84,-1.13,-0.81,0.36 -0.00,0.21,-0.76,-0.77,1.75,0.08 -1.00,-0.72,0.40,-0.91,0.14,-2.37 -0.00,0.89,-1.77,-0.30,-0.37,-0.55 -0.00,-0.97,-2.21,2.48,-0.47,-0.45 -0.00,1.08,-0.49,0.68,-1.17,1.13 -1.00,1.38,0.50,-0.85,-1.41,0.68 -1.00,-2.10,-0.47,-0.25,0.67,-0.31 -1.00,0.90,-0.55,0.54,2.67,0.64 -1.00,-1.07,0.41,-0.15,-0.21,1.29 -1.00,0.19,0.07,1.37,0.87,-0.73 -0.00,0.88,-0.24,0.34,-1.00,-1.67 -0.00,-0.62,-1.15,0.43,-0.82,-0.30 -1.00,-0.53,-1.55,-0.18,1.81,-0.62 -0.00,1.06,-0.29,0.32,1.30,-0.48 -0.00,0.41,1.04,0.38,0.55,0.59 -0.00,1.05,-0.54,-0.93,0.74,1.39 -1.00,-1.02,-1.56,1.26,2.80,1.13 -1.00,-1.44,1.14,2.23,2.70,-0.05 -0.00,0.48,-1.30,-0.23,0.15,-0.64 -0.00,0.03,1.76,0.27,1.83,1.00 -1.00,-1.32,0.98,0.64,0.02,0.97 -0.00,0.06,-1.63,-0.34,-1.25,-0.19 -0.00,0.16,0.35,-0.14,-0.30,1.44 -0.00,-1.63,0.80,0.55,-1.19,0.57 -1.00,1.64,0.64,1.46,-0.38,-1.41 -0.00,0.32,-1.84,-1.41,1.86,-0.48 -1.00,-0.43,0.47,0.38,-1.37,0.96 -1.00,-0.39,-0.28,0.04,0.16,-1.92 -0.00,-0.59,-0.21,0.66,-0.22,1.02 -0.00,0.47,0.41,-0.51,0.80,0.30 -0.00,-0.08,-1.02,-0.82,0.02,0.04 -0.00,0.03,-0.86,2.13,-0.62,-0.73 -0.00,-0.23,0.61,0.64,0.20,0.57 -1.00,1.33,0.18,-1.67,0.85,2.17 -1.00,1.13,2.11,0.29,-0.77,0.45 -0.00,-0.73,-0.59,-1.04,0.49,0.58 -0.00,-0.01,0.31,-0.03,0.09,0.11 -0.00,-0.74,0.20,-0.11,1.15,-1.63 -0.00,-1.00,-0.95,-0.68,0.08,1.42 -1.00,0.45,0.74,1.22,0.28,-0.34 -1.00,-0.39,-0.46,-0.12,-1.05,0.22 -0.00,-1.84,-0.30,-1.41,0.91,-1.30 -0.00,-0.15,-0.42,-1.13,-0.96,-0.95 -1.00,0.14,-1.54,0.31,-0.04,1.79 -0.00,-0.45,-1.58,0.90,-1.01,-0.75 -1.00,-0.10,0.09,-1.28,0.68,0.81 -0.00,-0.82,-0.61,1.07,-0.28,0.64 -0.00,-0.20,-0.46,0.72,-1.13,2.20 -1.00,-0.82,-0.77,-0.95,-1.42,0.69 -0.00,-1.73,-0.14,0.06,0.34,-2.60 -0.00,-0.23,-0.74,0.25,0.45,0.55 -1.00,-0.35,1.14,-0.49,1.18,0.09 -0.00,1.77,-0.37,0.99,-1.19,-0.16 -1.00,-0.09,2.05,-1.34,1.45,-0.08 -0.00,-1.55,-0.17,-0.38,-0.43,-1.25 -0.00,-0.22,-0.32,-0.03,0.51,0.02 -0.00,-1.33,-0.89,-0.01,0.01,0.76 -1.00,3.01,-0.69,0.05,-0.70,2.67 -1.00,0.32,2.45,1.08,-1.05,-0.66 -0.00,-0.23,-1.99,0.13,0.55,1.62 -0.00,-1.49,0.03,0.78,-1.80,0.85 -0.00,-0.36,-2.44,-0.23,1.04,1.70 -0.00,1.57,-0.80,0.17,0.08,-0.41 -1.00,-0.15,0.57,1.40,-1.18,-0.32 -1.00,1.15,-1.19,-0.32,-0.58,-0.27 -0.00,-0.01,0.35,-0.48,-0.69,-1.49 -0.00,0.25,1.09,0.08,-1.04,-1.35 -0.00,1.27,1.49,-0.26,-0.31,-1.74 -1.00,-1.08,0.22,-1.60,-0.63,-1.29 -1.00,0.79,-0.23,-0.30,0.14,0.47 -1.00,-1.87,1.21,-0.93,-0.14,1.13 -0.00,-0.20,-1.18,-0.85,0.34,0.36 -1.00,-0.07,0.11,-0.10,-0.13,-1.61 -0.00,-0.54,-0.89,0.91,0.62,1.57 -0.00,1.74,-0.54,1.10,-0.81,-0.93 -1.00,0.82,-0.22,-1.17,-0.70,-0.64 -1.00,0.70,0.33,0.25,-0.79,1.92 -0.00,0.44,1.09,0.15,0.33,-0.36 -1.00,0.05,-0.62,0.02,-0.19,-0.78 -1.00,-0.36,0.87,-0.13,0.73,-0.74 -1.00,0.27,0.02,-0.51,0.76,-0.15 -1.00,1.09,-0.47,-0.37,-0.47,-0.77 -0.00,-0.92,0.22,0.12,-1.88,0.12 -1.00,1.60,-1.32,0.27,-1.19,-0.57 -0.00,-0.03,0.00,-1.01,0.91,1.00 -1.00,0.01,0.81,0.03,0.51,-2.15 -0.00,1.40,-0.13,-0.46,-0.62,-0.78 -1.00,1.21,-0.89,0.72,0.24,0.96 -1.00,0.43,0.26,1.14,0.67,-0.17 -0.00,-0.94,0.83,-1.01,-0.22,-0.20 -1.00,0.30,0.88,-1.13,1.31,1.41 -0.00,-0.15,-1.34,1.41,0.36,0.14 -0.00,0.79,-1.00,0.33,-1.23,-0.72 -1.00,-0.44,-1.05,1.30,-1.39,0.27 -0.00,-0.56,0.62,-0.63,-0.40,-1.83 -1.00,1.73,-0.04,-0.99,-0.87,-0.26 -1.00,-2.85,-0.19,1.13,-0.87,0.66 -0.00,1.46,-0.76,-1.04,1.75,0.83 -1.00,-0.16,-0.98,0.12,0.71,0.78 -0.00,1.52,-1.04,0.23,-1.45,0.26 -0.00,0.19,-0.38,-1.34,-0.01,3.35 -0.00,-1.08,-1.65,-0.45,-0.67,-0.46 -0.00,0.33,-0.79,0.15,1.00,-0.53 -1.00,-0.40,1.48,-0.60,-0.33,2.67 -0.00,-0.22,-0.60,1.22,-1.88,0.34 -1.00,1.30,-1.54,0.41,-0.31,-0.85 -1.00,1.81,0.91,-0.85,-1.61,-2.00 -1.00,-1.64,-0.58,1.46,0.34,0.93 -1.00,1.82,0.47,-1.62,1.76,1.62 -1.00,-0.09,-1.69,1.76,-0.77,-0.38 -0.00,0.38,-1.07,-0.67,1.24,-0.78 -0.00,0.55,-0.52,-0.86,0.87,-1.39 -1.00,-0.78,-0.54,0.26,-0.28,1.86 -1.00,-0.35,-0.13,0.52,2.25,1.77 -0.00,-2.27,-1.99,1.03,1.37,0.20 -0.00,-0.72,0.30,0.54,0.03,0.02 -1.00,2.49,-0.48,-1.96,-1.33,-0.83 -1.00,-0.56,-0.41,1.20,-0.61,-0.44 -0.00,1.09,-0.98,0.56,0.47,0.91 -0.00,-0.09,-1.04,-0.91,0.49,-0.38 -0.00,-0.47,1.19,-0.41,-0.94,0.16 -1.00,-0.77,-0.70,0.56,-1.31,0.84 -1.00,1.26,1.24,1.00,0.39,0.99 -1.00,-0.92,0.21,0.97,-1.27,-0.76 -0.00,0.10,0.37,0.01,0.01,-0.38 -1.00,-1.18,-0.80,0.09,0.78,-0.60 -1.00,0.68,-0.70,2.26,-1.15,0.50 -1.00,0.34,0.90,1.99,1.43,-0.49 -0.00,1.08,-0.75,1.00,-1.18,-1.04 -1.00,-0.20,0.87,1.41,0.12,0.04 -0.00,-0.81,-0.08,-0.34,-0.15,-0.66 -1.00,0.33,-0.28,0.46,-0.54,-2.59 -0.00,0.46,1.45,-0.40,1.16,-0.40 -0.00,1.46,1.08,-0.81,0.52,0.22 -1.00,-0.55,-0.08,-1.52,-0.98,1.20 -1.00,-1.39,-0.14,1.05,2.06,-1.49 -0.00,0.19,-1.07,-0.09,1.19,-0.21 -0.00,1.82,1.69,-0.35,0.61,0.05 -0.00,-0.42,-2.00,-0.18,-0.25,-0.84 -1.00,1.09,-0.17,1.30,1.19,0.37 -1.00,-0.01,0.60,-0.46,-1.40,2.05 -0.00,-1.77,-1.09,0.54,-0.40,0.07 -0.00,-0.97,0.30,1.06,-0.65,1.01 -0.00,0.13,-0.92,0.47,1.55,0.14 -1.00,0.66,-0.43,-1.46,0.14,-0.57 -0.00,0.77,2.47,-0.18,-0.63,-1.01 -0.00,1.29,1.07,-0.02,-0.33,0.09 -1.00,0.71,0.45,-0.19,0.76,-0.25 -0.00,0.68,0.27,0.29,0.52,0.30 -1.00,-0.05,-0.81,1.63,-1.04,1.09 -1.00,1.26,-0.09,0.02,1.18,0.27 -1.00,-0.11,1.90,1.12,1.27,0.02 -0.00,0.30,1.55,-1.99,-0.08,-0.58 -1.00,-1.11,1.76,0.78,0.17,-1.04 -1.00,-2.16,0.04,-0.38,-0.78,-0.13 -0.00,-0.77,1.66,0.94,0.98,-0.12 -0.00,-1.49,0.86,0.25,0.62,0.24 -1.00,-0.69,-0.81,-0.95,0.25,1.49 -0.00,-0.85,0.57,0.70,0.21,-0.73 -1.00,1.42,1.42,-1.16,1.46,0.37 -0.00,-0.48,-0.76,-0.10,1.20,0.65 -0.00,1.62,0.40,1.38,0.32,-0.61 -0.00,-0.08,0.74,-0.01,-0.46,0.79 -0.00,-1.33,1.05,-2.14,-1.26,0.45 -1.00,0.43,-0.49,-1.63,1.94,-0.41 -1.00,0.05,0.15,0.32,-0.22,-1.02 -0.00,1.11,-0.55,0.25,-0.70,-0.80 -1.00,1.67,-1.24,0.95,0.13,-0.16 -1.00,-1.51,-0.99,-0.77,2.36,1.66 -1.00,-1.16,1.29,0.35,1.18,0.16 -0.00,0.31,-1.03,-0.23,-0.10,0.17 -1.00,0.45,0.13,-0.67,-1.20,1.07 -1.00,-0.26,-0.09,-0.46,-0.10,0.05 -1.00,-0.18,-0.39,-0.05,0.34,0.29 -0.00,-0.28,0.71,0.43,0.63,-1.07 -0.00,-0.93,1.54,0.53,1.57,0.31 -1.00,-0.20,1.57,0.67,-0.14,-1.16 -1.00,0.69,0.15,0.01,0.36,0.12 -1.00,1.66,-0.34,0.96,0.11,-0.31 -1.00,0.90,0.23,-1.77,1.44,0.23 -1.00,-0.29,-0.41,-0.65,1.51,-0.97 -1.00,-0.35,0.17,-0.45,-0.98,-0.87 -0.00,-0.81,-0.77,-0.74,0.16,0.18 -0.00,0.13,-2.27,0.26,0.39,-0.37 -1.00,-0.20,-1.17,-0.91,1.83,0.87 -1.00,-1.24,0.43,0.14,-0.66,0.02 -0.00,-0.97,-1.10,0.14,-2.43,-0.20 -0.00,-0.40,1.28,-0.07,1.76,0.16 -0.00,-0.59,-1.22,-0.11,-0.56,0.20 -0.00,1.41,0.32,-0.83,0.34,-1.51 -1.00,2.10,0.59,0.03,-1.88,-1.52 -1.00,0.21,0.84,1.79,0.55,0.20 -0.00,-0.16,-0.07,0.99,-1.83,-1.28 -1.00,1.70,0.90,-0.20,1.13,-0.67 -0.00,1.34,0.10,-0.44,0.21,0.97 -1.00,1.59,-0.49,1.58,0.06,1.47 -0.00,-1.27,-2.48,-0.23,-0.64,-1.84 diff --git a/lectures/enet_poisson.csv b/lectures/enet_poisson.csv deleted file mode 100644 index 27ad0139b..000000000 --- a/lectures/enet_poisson.csv +++ /dev/null @@ -1,200 +0,0 @@ -1.00,0.19,-1.29,0.07,-2.64,1.20 -0.00,0.78,0.02,-0.46,-0.29,-2.26 -0.00,-1.58,2.16,0.31,-0.18,-1.72 -0.00,-0.54,-2.32,-0.48,1.03,-0.09 -1.00,-0.51,1.89,-0.07,0.67,0.57 -1.00,1.05,-1.20,-0.28,1.48,-0.89 -0.00,-2.48,-0.87,-1.05,-0.68,-1.58 -1.00,-1.36,-0.53,-0.67,0.37,-1.10 -3.00,-1.34,1.21,1.65,1.29,-1.42 -2.00,-0.67,-0.49,2.41,-0.96,-0.83 -0.00,0.35,-1.14,-0.46,-0.08,-1.09 -1.00,0.94,-1.71,1.38,-0.67,0.27 -1.00,-0.16,-1.08,1.04,-1.58,0.45 -1.00,0.68,0.48,-0.83,-0.48,1.09 -0.00,-1.11,-0.10,-0.19,-0.37,-2.06 -0.00,-0.79,-0.48,-0.73,-1.69,0.69 -1.00,-1.07,-0.48,-0.77,1.61,-0.58 -0.00,1.76,0.42,-1.21,-0.03,0.32 -1.00,-1.39,0.48,0.27,0.15,-0.10 -0.00,0.53,0.55,0.77,-0.22,-0.29 -0.00,-0.89,0.94,0.59,-0.23,-1.17 -0.00,-0.29,0.04,0.07,0.32,-0.04 -1.00,-1.20,-0.48,0.17,-1.80,1.33 -2.00,-0.04,1.63,0.20,-0.89,-1.21 -2.00,-0.25,-1.22,0.31,0.61,1.03 -0.00,0.00,0.31,1.19,0.24,-1.37 -1.00,-0.60,0.19,-1.55,0.08,-0.41 -0.00,-1.13,0.08,0.73,-0.81,-1.36 -1.00,-0.44,1.56,-0.55,-1.33,-1.27 -1.00,0.12,-0.44,1.64,0.54,-0.51 -0.00,-0.82,-0.40,-1.29,-1.15,-0.75 -3.00,-0.18,0.12,1.23,1.16,-0.38 -0.00,-0.44,-0.02,-1.71,-1.52,-1.01 -1.00,-0.81,-1.17,-0.53,1.00,-0.48 -1.00,-0.48,1.09,-0.73,0.53,0.78 -3.00,0.30,-0.41,1.63,0.14,0.88 -0.00,1.76,-0.50,0.81,-2.67,-1.31 -3.00,-1.82,2.02,1.00,0.02,1.96 -1.00,0.90,-1.35,-1.40,0.35,-2.39 -0.00,-0.39,-0.12,-0.39,0.37,-0.25 -1.00,1.93,-0.02,0.99,1.11,-1.11 -2.00,0.43,0.86,-0.65,2.61,-1.01 -1.00,-0.92,0.32,-0.28,-0.52,-0.40 -0.00,0.81,0.26,-0.32,0.05,-1.44 -0.00,2.15,0.08,-1.06,0.20,-1.01 -2.00,2.07,-0.59,0.70,-0.73,-0.21 -2.00,1.21,0.84,-0.63,0.12,0.26 -3.00,0.07,1.05,-0.71,0.21,-0.47 -1.00,0.75,-2.16,-1.62,-0.21,-2.59 -0.00,-0.49,0.51,1.60,-0.25,-0.78 -2.00,0.45,-0.18,-0.44,0.04,0.23 -0.00,0.84,-2.40,0.91,-1.12,-1.88 -0.00,0.08,-0.99,-2.44,1.03,-1.23 -2.00,-0.22,0.33,-1.57,0.78,0.73 -0.00,-0.34,1.34,0.92,-0.35,0.55 -2.00,0.62,0.91,-0.31,1.87,-0.04 -1.00,0.83,0.85,0.08,-0.16,0.68 -2.00,0.39,-0.76,0.62,0.10,0.06 -2.00,2.30,0.29,-0.73,0.14,-1.38 -0.00,-0.10,-0.06,-1.66,0.73,-0.14 -0.00,0.71,-0.06,-1.23,0.45,0.13 -0.00,-0.53,-0.23,-0.54,0.37,-0.54 -0.00,0.01,-0.32,-0.14,-1.93,0.83 -0.00,-1.78,0.90,-1.25,-1.34,-0.75 -1.00,-0.73,0.04,1.36,0.45,0.38 -0.00,-0.84,-0.36,-0.28,2.21,0.01 -0.00,-0.12,1.54,-1.89,-0.81,-0.45 -1.00,-1.36,1.31,-0.05,0.08,-0.75 -1.00,-1.26,-1.82,-0.05,-1.28,-0.37 -0.00,0.77,0.89,-0.11,1.17,1.10 -1.00,0.22,0.85,-0.43,-0.96,0.50 -0.00,0.51,0.26,-1.28,-0.85,2.98 -2.00,0.74,0.19,0.53,-1.21,-0.07 -2.00,1.41,-0.50,-0.17,2.14,-1.84 -0.00,-0.01,-0.34,0.35,-0.58,0.64 -1.00,-0.93,0.30,-0.59,0.34,-1.34 -0.00,-0.27,-1.10,0.41,0.12,1.11 -1.00,-0.19,1.03,1.03,1.09,-2.01 -0.00,0.09,-0.02,0.60,0.69,-1.07 -0.00,-0.48,-0.09,-0.39,0.19,-0.28 -0.00,0.03,-0.66,0.65,-1.85,-1.19 -3.00,-0.68,1.02,1.21,0.87,-0.02 -0.00,0.13,-1.03,-0.45,-0.17,-0.92 -0.00,-0.63,0.03,-1.05,0.83,0.70 -1.00,0.56,0.14,-0.31,2.02,0.50 -0.00,-0.06,-0.44,1.05,-1.04,-1.00 -2.00,-2.93,1.73,-1.79,1.59,0.08 -1.00,0.17,0.42,-1.31,2.19,-1.60 -1.00,0.07,-0.16,-0.12,-0.44,-0.37 -0.00,0.28,0.60,0.02,-0.04,-0.86 -2.00,-1.56,-0.25,1.00,-0.13,0.59 -0.00,-1.22,0.24,-1.64,-0.75,-0.27 -1.00,2.10,-1.71,-0.63,-0.90,0.46 -2.00,-0.49,2.23,1.00,0.23,0.09 -2.00,-0.64,3.33,-0.40,0.51,0.16 -1.00,1.52,-0.17,0.30,-0.90,-0.83 -2.00,1.29,-1.89,0.41,0.12,0.56 -1.00,0.22,1.08,0.25,-0.85,-0.25 -0.00,0.31,-0.17,-0.20,0.40,-0.06 -1.00,0.29,-0.73,-2.33,0.58,0.42 -2.00,0.59,-0.27,0.07,-0.33,-0.57 -0.00,-1.59,0.93,0.42,-1.91,-0.48 -2.00,-0.15,0.34,-0.91,-0.91,1.83 -1.00,1.88,-0.09,-1.47,0.69,0.30 -0.00,1.56,-2.15,-0.13,-0.66,-0.21 -0.00,0.29,1.93,-0.31,-0.95,-1.23 -1.00,0.24,1.16,0.35,-0.22,1.00 -1.00,0.72,2.22,0.79,0.01,-0.24 -3.00,0.30,1.15,0.67,0.73,1.53 -0.00,-2.42,0.45,-0.63,-3.36,0.19 -1.00,0.39,0.53,-1.37,0.14,0.07 -1.00,-1.96,0.80,1.34,0.94,-0.46 -0.00,0.45,1.15,1.82,-0.82,-0.07 -1.00,0.01,1.80,-1.56,1.42,-0.58 -4.00,-0.42,0.95,-0.13,-0.54,1.29 -1.00,1.02,-1.19,-0.59,-0.39,0.13 -1.00,-0.28,0.13,0.17,0.54,-0.17 -3.00,0.33,0.19,0.58,-1.40,-0.64 -0.00,-1.80,2.59,-1.06,0.99,1.27 -0.00,-1.63,-1.26,-0.67,0.03,-1.31 -1.00,0.81,-0.03,-1.63,-0.10,-0.09 -0.00,0.37,-0.95,-1.88,0.44,-0.58 -1.00,-0.08,-2.13,-0.48,1.84,-0.21 -3.00,0.10,-0.62,1.05,-0.43,0.43 -1.00,0.08,-0.79,0.19,-1.91,-0.73 -2.00,-0.32,0.93,-1.42,0.25,0.42 -0.00,1.02,0.72,-0.97,0.26,-2.89 -1.00,-1.60,0.18,-0.32,0.60,-0.71 -0.00,-1.80,1.92,-0.32,-0.17,1.54 -1.00,-0.13,2.33,-0.10,-0.03,0.87 -1.00,-0.38,1.09,1.65,-0.48,-1.06 -2.00,-1.45,-0.19,1.51,-1.30,0.95 -1.00,0.93,-1.83,-0.75,1.29,-1.09 -1.00,-0.24,0.75,-0.94,-1.03,0.04 -1.00,0.15,-1.06,0.32,-0.43,-1.10 -1.00,0.76,-0.49,0.65,1.62,-1.81 -1.00,1.07,-0.26,0.96,-2.02,0.57 -2.00,-0.05,-0.24,1.08,0.61,0.10 -0.00,0.62,-0.80,1.15,1.08,0.41 -1.00,-1.75,-1.60,1.10,-1.41,-3.07 -5.00,0.39,0.04,-1.42,0.36,2.00 -0.00,-0.03,-0.53,-0.71,-0.32,1.14 -1.00,0.52,0.80,-0.97,-1.50,-0.53 -1.00,2.13,-2.04,0.25,0.59,-1.84 -1.00,-0.10,-1.13,0.79,-0.38,0.65 -1.00,0.34,-0.06,-0.89,0.16,-1.86 -0.00,0.07,-1.35,-2.24,-0.21,-0.06 -0.00,1.94,-0.53,0.34,0.29,0.37 -1.00,-0.29,0.50,1.08,-1.00,0.48 -0.00,-0.45,1.04,-0.46,-0.78,-2.26 -1.00,0.61,0.24,2.04,-0.29,1.23 -0.00,1.61,0.77,-0.71,0.04,-1.81 -1.00,1.65,-0.05,0.11,-1.94,-0.91 -3.00,0.11,0.09,-0.74,0.74,1.81 -1.00,-1.17,-0.91,1.10,1.19,0.53 -3.00,1.46,0.37,2.49,-1.06,0.18 -3.00,1.69,0.89,0.09,0.35,-0.26 -0.00,0.85,-0.59,-1.19,-0.29,-0.39 -1.00,-1.43,0.01,-0.71,0.88,-0.98 -0.00,0.58,0.85,-0.68,-1.23,-0.61 -1.00,-0.35,-1.19,-0.11,-0.23,3.35 -0.00,-1.78,-0.35,0.44,-0.12,0.36 -0.00,-0.52,0.60,0.66,0.28,-0.67 -1.00,-0.63,1.34,-1.16,0.67,0.74 -3.00,-0.44,1.29,0.77,0.51,0.42 -0.00,-3.13,-0.32,-1.00,0.31,-0.85 -0.00,-1.13,0.30,0.93,-1.11,-1.46 -2.00,1.13,-0.61,-1.11,-0.31,1.20 -1.00,-1.71,0.69,-0.88,-0.73,-0.12 -1.00,-0.09,2.04,-0.77,-0.00,0.07 -0.00,-0.03,1.15,0.30,1.33,0.38 -0.00,-1.12,0.21,-1.95,0.07,0.22 -0.00,-0.69,-0.46,-0.90,-0.00,1.34 -0.00,-0.47,0.28,1.22,0.15,-1.31 -0.00,-0.18,-0.09,0.57,-0.37,-1.41 -2.00,-2.29,1.24,1.69,-0.06,1.71 -0.00,0.28,-1.66,1.06,-0.59,-1.19 -3.00,0.34,2.06,-0.25,1.11,0.73 -1.00,1.49,-0.12,2.39,0.82,-1.39 -4.00,0.97,-0.86,-1.07,0.82,-0.73 -0.00,-1.57,0.77,0.70,0.23,-1.38 -3.00,0.82,1.46,0.41,1.04,0.22 -2.00,0.96,-1.64,-1.18,1.48,-0.40 -0.00,0.66,0.34,0.87,-1.01,-0.12 -1.00,-0.27,-1.33,0.31,-0.30,0.43 -2.00,1.15,0.24,1.47,0.38,-0.66 -2.00,-1.06,1.30,-0.52,0.20,-0.38 -1.00,-1.98,0.65,-1.93,0.78,0.79 -2.00,0.33,0.05,1.02,1.01,-1.77 -3.00,1.61,1.40,2.53,-0.18,1.15 -1.00,0.52,-0.70,1.20,0.32,-1.74 -2.00,0.05,-0.87,0.69,-0.12,-0.11 -3.00,2.26,0.95,-0.42,0.50,1.57 -1.00,-1.87,-1.71,-0.20,0.64,1.12 -2.00,1.01,-0.68,1.59,0.92,2.70 -1.00,0.67,-0.13,-0.91,-0.13,-1.62 -6.00,0.82,1.68,0.93,1.09,1.76 -1.00,-0.93,-0.50,0.92,-0.40,-1.18 -1.00,-0.68,-0.24,-0.91,1.33,0.22 -2.00,2.13,0.88,-0.42,-0.88,1.70 diff --git a/lectures/lasso_data.csv b/lectures/lasso_data.csv deleted file mode 100644 index 12e133f53..000000000 --- a/lectures/lasso_data.csv +++ /dev/null @@ -1,300 +0,0 @@ --178,3,59,-132,-32,-126 -303,17,-241,12,-81,41 --54,159,2,5,-68,-150 -395,122,5,34,135,215 --52,101,29,17,100,60 -255,-14,-146,-60,46,29 --21,192,-131,51,-181,-111 -278,49,-69,69,-51,93 --38,25,-48,-115,-5,30 --191,101,75,-109,-48,82 --373,-179,15,-76,56,-86 -123,-68,-78,-115,-26,-85 --32,-102,84,42,45,41 --78,-32,48,74,-57,34 -225,4,-115,-107,125,130 --162,29,22,24,-31,144 -32,88,-32,67,-65,-93 -28,-50,-80,-43,234,-107 -35,-244,132,-31,47,154 -1,62,61,92,-58,13 --427,-176,67,-152,-82,-94 --54,-52,3,-54,-57,16 -185,-46,-173,95,114,-50 --111,-49,19,25,-46,-129 --212,-131,-91,-37,-87,-93 -352,-12,-15,106,-14,146 -76,32,-78,65,115,60 -34,28,-35,-22,73,11 -243,58,-189,-101,112,230 -244,-37,-241,-9,166,16 -10,122,-112,-31,-136,-32 -170,-128,-165,106,18,-61 -38,54,-104,-53,32,17 --143,-142,74,-119,-140,3 -68,-118,-114,68,109,43 -168,-104,-215,7,83,65 --80,16,-115,-74,-44,14 -432,-103,-317,-34,-87,35 --173,-50,-40,-74,211,-73 --156,-102,39,-60,-8,-12 --306,143,43,19,41,-122 -90,28,-6,-31,-20,62 -11,140,-101,73,-67,-63 -30,145,-39,75,93,-46 -36,-20,26,-76,-88,86 --398,-194,234,9,48,-37 -137,-71,-53,-128,-41,111 -28,43,-192,-97,-249,-4 --165,-40,114,63,-33,-5 -38,71,58,73,34,9 -174,-15,69,65,-88,15 --12,8,122,153,126,-61 -50,-61,-74,-72,38,18 --520,9,277,-157,42,-19 --24,-79,97,-34,7,29 -42,-83,-64,83,74,-44 --186,92,35,-123,29,-85 --90,-124,-149,107,31,-228 --228,-165,5,-189,134,5 -99,-23,-92,97,102,-44 -145,108,105,74,58,70 -16,54,48,157,4,-170 --162,9,9,-45,39,-6 --96,-87,82,-61,69,71 -51,23,-114,-163,172,112 -117,-129,-27,22,-40,-107 --140,24,81,253,-26,-186 -236,98,82,154,-71,90 --500,-16,118,-154,-104,-34 --368,1,155,-217,339,-71 --325,155,-126,-244,65,-70 --96,-112,25,67,29,-171 -376,-87,-237,-22,6,40 -147,-46,103,21,151,42 -236,-107,-177,60,-131,9 -102,-87,-50,58,-99,-128 -408,-31,-119,163,96,82 -73,19,-56,109,-13,-82 -99,32,24,62,-110,31 --183,38,59,12,-82,4 -40,-37,135,99,-3,-4 --224,213,54,-99,-133,104 -59,-111,-18,-111,-68,96 --68,8,-63,-48,226,-206 --86,-62,141,117,-24,-55 --370,143,116,50,281,-211 --333,-14,-45,-203,47,-74 -62,-29,-82,-82,-199,102 --260,0,-72,-71,-76,-181 --165,152,110,62,-6,-56 --101,-13,40,17,123,-65 -193,234,-26,0,-149,60 -112,-74,-59,146,116,25 -36,26,111,92,-117,62 --106,-33,85,61,83,-200 -10,-162,-23,-34,89,30 -92,162,-158,-24,46,-58 --200,117,55,-109,-16,-7 -154,-94,-110,38,-74,-62 --115,150,-48,-117,-77,-126 -124,32,-94,13,11,77 -97,-118,-30,36,-44,87 --42,-31,14,-64,-323,84 -133,12,54,113,-154,47 -270,-87,-155,97,241,143 -18,-134,-41,-64,-20,-92 --149,-98,13,-39,78,35 -66,-73,-194,10,-167,-190 -268,85,-171,-33,1,114 -153,6,13,155,-138,40 --163,31,-52,-49,26,18 --232,-53,112,11,-131,-146 --87,151,4,91,38,-52 -38,2,-101,-7,29,35 -1,235,131,64,50,42 -9,64,123,195,-78,-10 -242,-48,-63,-10,189,9 --234,25,38,29,89,-66 -380,159,-167,218,130,-125 --369,-169,215,77,-13,-205 -220,92,-109,-9,-351,126 -109,-113,-28,-35,-40,76 --10,46,71,11,-81,-68 -191,-85,-24,96,66,64 -6,-2,72,51,-18,95 -158,153,14,233,88,23 --113,28,1,-94,254,-58 -97,67,13,16,-66,121 -11,55,60,99,117,-87 -37,169,-47,-44,-14,30 -36,23,-141,-180,-110,11 --380,138,149,10,-117,-160 --93,-115,-17,102,-91,-59 --160,108,101,-45,-36,8 --93,-109,-1,27,17,-72 -32,60,44,-42,100,-57 --266,110,173,-44,-81,-28 --220,-43,47,-0,-59,-21 --52,51,-4,36,43,47 -104,-75,27,3,24,67 --184,-125,-44,-48,43,-91 -504,-159,-118,30,8,233 --320,-12,-35,-106,146,-145 -55,-63,-176,-59,43,-59 --47,-134,91,70,117,46 -104,-28,39,14,11,102 -110,-255,52,35,-91,49 --333,-179,45,55,-29,-161 --304,-64,28,-87,122,-161 --15,30,-35,84,188,-170 --122,42,-29,38,160,-99 -295,-100,-51,100,-75,-10 --281,-65,-45,11,109,-45 --15,82,56,97,90,103 -80,-32,113,163,3,92 -91,37,-58,4,-18,26 -192,-73,-36,-2,-90,130 -84,-186,50,-3,-42,-6 -28,-109,-151,84,-228,-90 --281,101,57,-88,-176,3 -118,-103,154,321,-16,-8 --314,29,135,-135,17,-16 -11,100,187,14,17,123 --3,-108,54,14,146,42 -365,-7,-84,187,-174,-6 -374,-113,-188,21,61,33 --67,18,-30,-29,7,12 -69,-181,-51,-21,11,25 -62,-95,-135,125,-99,-133 -448,209,-60,58,-69,100 -203,25,-136,64,-20,-103 --346,-152,153,33,7,-80 --3,-17,3,-47,-43,-44 --32,-156,-3,72,-133,27 --11,57,23,-14,43,98 -281,31,-50,-13,-89,186 --233,-180,55,-82,61,-98 -225,-79,16,243,8,37 -25,-80,28,133,-57,-228 -86,20,-37,-38,-76,28 --184,-113,33,-62,54,21 --39,-31,-42,61,-27,-80 -243,-46,-159,-37,181,146 -143,-5,9,11,-133,111 --131,-36,-8,-26,-106,-108 -252,-53,-55,121,-72,13 -20,135,-14,-187,-54,12 --428,-63,-75,-183,-45,-144 -266,-105,-2,30,37,125 --346,169,156,101,-130,-355 -233,-185,-88,122,67,18 -331,-81,-20,158,-102,-134 -93,54,23,89,115,-31 --229,-62,65,-96,135,-81 --115,-133,64,-79,89,107 -309,-120,-52,84,37,91 --221,-24,70,-193,152,288 --305,36,88,-52,10,-56 --137,78,77,1,-32,6 -149,-68,-27,136,54,-40 --13,-213,73,12,-7,116 -152,-102,96,75,45,-20 -38,220,-38,-46,-120,39 -138,67,-31,-41,55,36 --331,49,125,-94,-105,-60 --2,-97,-85,81,-11,-48 -130,-50,35,112,74,-15 --87,62,76,-14,127,-87 -23,-121,-8,72,-45,-48 -43,-81,48,42,-53,74 -125,-76,-81,58,19,62 -23,14,137,86,-186,131 -67,35,-87,-39,16,50 --274,61,137,71,17,-146 --126,-76,88,16,-33,-69 --284,-48,32,18,73,-89 --189,34,201,-116,-17,38 -280,47,-21,255,10,59 --142,-98,21,-30,-108,-37 --229,-14,153,26,-47,-108 -8,-110,-40,-30,-54,-8 --34,-95,-79,30,134,-51 --163,-128,67,25,-17,-132 -186,-100,-25,60,-29,206 -129,-33,-128,97,-100,-39 --63,-68,30,6,0,134 --305,-85,58,-136,162,134 --117,-77,-77,-38,127,-53 -257,31,25,166,-55,30 -23,-5,71,21,28,-27 --83,-117,-65,-4,-238,-50 --11,116,157,60,-69,-34 --70,77,133,-69,-91,128 -122,145,28,-88,20,193 --241,-72,-25,14,-50,-57 -90,156,63,-7,135,84 -200,63,-63,6,-46,123 --337,131,185,-8,44,-91 -380,-37,-47,205,-59,37 -77,13,90,176,137,32 -23,-38,-36,13,242,34 --65,97,82,-48,76,134 -7,-18,143,48,-17,79 --263,11,228,-193,90,37 --86,-84,55,-65,51,-129 --295,79,78,-119,51,-85 --139,-75,121,-91,58,43 -29,226,112,154,-142,-136 --32,175,-55,-115,91,-85 --130,-50,158,64,-65,59 -44,-28,-45,-129,-221,20 --94,130,117,13,-16,201 -93,148,53,-52,84,197 -74,-26,-18,-20,16,64 --91,-56,-69,-144,81,27 --261,-122,21,-95,-29,-207 -333,61,-38,-51,113,234 -33,108,42,14,77,188 -306,12,-134,4,-20,98 --60,-81,131,77,86,-7 --136,97,35,23,-73,-55 -127,129,-107,21,170,-54 --236,-10,82,23,-98,-76 --185,-144,37,-56,-230,-129 -132,-98,-33,42,-12,30 --197,19,-49,-205,37,1 -240,-207,-26,-16,-171,253 -55,3,23,-43,-67,188 -121,-48,20,82,-13,-39 --59,203,112,-27,-26,125 --32,68,-27,15,-112,-59 -138,15,34,202,-123,14 -173,70,-80,27,79,-23 --72,-93,109,21,-44,116 --33,-165,119,-160,-197,54 --286,109,-12,-135,-20,-99 -168,-12,-0,51,-14,119 -245,47,-168,111,-3,34 --149,134,-41,-97,31,-103 --66,32,-35,-284,-14,157 --80,26,124,31,107,-45 --449,-50,54,14,-67,-39 -213,84,-193,112,-77,-55 --58,-37,-420,-54,168,-133 -84,4,-63,47,-75,-11 -56,-69,-96,-118,37,52 -228,-38,-25,54,135,69 --119,-1,70,136,152,-98 -274,-42,-132,85,-117,138 --73,-84,145,-81,22,156 -142,-86,-13,117,-52,135 -97,91,-75,33,165,-81 -484,-43,-107,196,-104,125 --130,142,-13,-11,-109,-213 --230,41,-1,-131,-113,-56 -157,-77,134,198,-218,132 --268,-128,-127,-66,-65,-190 -159,-63,-45,118,139,-8 -21,-100,-128,-158,-61,-12 -48,41,-54,248,62,-185 diff --git a/lectures/y_arma_data.csv b/lectures/y_arma_data.csv deleted file mode 100644 index 475d23871..000000000 --- a/lectures/y_arma_data.csv +++ /dev/null @@ -1,251 +0,0 @@ -y_arma11,y_arma14,y_arma41,y_arma22,y_arma50,y_arma02,y_arma11c,y_arma14c,y_arma41c,y_arma22c,y_arma50c,y_arma02c --2.047076594847129505e-01,1.394072226185005814e+00,-7.618372133529949242e-01,-4.064523617925941412e-01,-9.835047173983663127e-01,6.143504692328910499e-01,4.259432631872511976e+00,6.738236941732065333e+00,3.921172305736134511e+00,4.841410872820988587e+00,3.567921955380871424e+00,5.020264181479198839e+00 -2.537649126243639586e-01,4.567375047627768581e-01,-1.169637832882710837e+00,-2.904655508294423649e-01,1.933152260847050830e-01,6.429869148939288959e-01,4.398428009976181663e+00,7.368314202772689114e+00,5.710735793018799633e+00,4.558666615338087702e+00,5.627433726992007834e+00,4.271942218168720551e+00 --1.614848622683232593e-01,-1.272326703442733020e+00,-2.554150415613765990e+00,4.377894222244522737e-01,-3.224625683687642463e-01,-5.829343493568055479e-01,3.894220064383869140e+00,6.410706525591642446e+00,6.653104231835761162e+00,4.369696549146111941e+00,5.441687121317324127e+00,3.719432040526521988e+00 --8.586475013185908001e-01,-9.458320913213407177e-01,-1.416810339850927303e+00,1.292133237669666368e+00,-2.434714935535151170e+00,3.225999093088858238e-01,4.254945463650283699e+00,6.774751472682821074e+00,5.813209651463687067e+00,4.848351385863497676e+00,3.693440719499076152e+00,5.799271295947738381e+00 -1.127289339992149531e+00,-6.883717809906886309e-02,-2.004237998790744379e-01,1.802924788488173835e+00,-1.990610824223469688e+00,-4.553547958839088139e-02,4.888509710140729325e+00,4.026927273300911381e+00,5.506526563168436361e+00,4.708763021171975538e+00,2.747097130265434117e+00,5.199862796144467225e+00 -2.926896038343052453e+00,-8.584562919036571316e-01,-1.274069813257600048e+00,-3.157042843910953245e-01,-2.664447937057306537e-01,5.833794675456231982e-01,6.113454539016629496e+00,2.075139727032464076e+00,6.999617863858643929e+00,4.004972492948537521e+00,4.783782184444534735e+00,5.428226532398704229e+00 -2.775771947041553389e+00,-1.796823860810985218e+00,-3.435812286850086217e+00,3.328747540357390733e-02,1.713166700014398525e-01,1.287328621636036230e+00,5.768913584533245320e+00,5.364144882553992133e+00,6.220457380072277331e+00,3.279857874780836013e+00,4.382208504444793817e+00,5.760983622424916462e+00 -2.396092869971668371e+00,-1.374140342963602990e+00,-3.324410934220093150e+00,6.241378334643329362e-01,-1.008235022896845523e+00,-2.571874757040344406e-01,6.783227456396678789e+00,6.453556693243497122e+00,4.760031787665699454e+00,1.397159712575776425e+00,3.955895170740390210e+00,4.892141935490703197e+00 -2.664703373581160939e+00,-2.274295006054620760e+00,2.847437087200943573e-01,-1.677062469844210035e+00,1.071593491473754156e+00,5.943089576180543565e-02,8.764846998978599757e+00,3.134175247222279470e+00,4.618905799269997559e+00,3.832999293510881422e+00,4.169523377113049989e+00,4.820177423393641192e+00 -3.514120165236296778e+00,-1.695939107720390027e+00,5.749749943751869541e-01,-1.372251161366488947e+00,1.414936450756299369e+00,8.067203104089537558e-02,6.664487842459541511e+00,2.089710848527895237e+00,7.375027854593720456e+00,8.576140496669221847e+00,6.748639486536131216e+00,2.669498198821129265e+00 -4.079031639245426888e+00,-1.383046694106316643e+00,-2.344893313337678364e+00,-6.250943658791197066e-01,1.328692743772108553e+00,1.289696408214198620e+00,5.123872851709446508e+00,4.754158166466891267e+00,7.226677542929837550e+00,7.811349700243606975e+00,6.399961038027319304e+00,2.505802874791816404e+00 -2.115568895475858469e+00,-1.285696195785905438e+00,-5.410382313438887536e+00,-9.582233749130679712e-01,-4.514660067096110119e-03,6.906817862356203763e-01,4.624298391542023268e+00,4.532322449938698483e+00,5.895034864771226069e+00,4.201755661206154180e+00,3.841171273307598000e+00,3.901458429730415567e+00 -1.407990916849725771e+00,-4.315387699941466426e-01,-2.434441803202209265e+00,-2.143241193463119032e-02,1.991430048897706306e-01,-3.245252085642316153e-01,3.298440951207899374e+00,3.023850958378810461e+00,6.011888360633887984e+00,2.048413650493551952e+00,5.931427026305052586e+00,5.352527421085418347e+00 -1.381153138273853553e+00,1.084288569595046825e+00,1.475497484267841219e+00,1.675123411059225242e+00,1.269439435330158350e+00,-1.045172753525086096e+00,3.103686242816811269e+00,4.080448938513448809e+00,6.668471493814188022e+00,3.319703108792884105e+00,7.769291575371815028e+00,3.920245059994244663e+00 -2.468901196498200612e+00,4.531380836694007819e-01,9.757983087292942947e-02,1.159459620823442894e+00,1.329155575330689176e+00,-1.830503306089610316e+00,3.658159191813463718e+00,6.346035248965537612e+00,7.077166516280438913e+00,4.554699042844648105e+00,5.492605970846351937e+00,4.318612795940402371e+00 -3.211626130273146806e+00,-2.190557335162672814e+00,-4.025770289739019070e+00,2.374106873340321999e-01,-1.204791600774974603e+00,-3.953349523769545404e-01,3.372537397371419665e+00,4.291890175742812730e+00,4.582735795002996326e+00,8.003629851130945383e+00,3.299960257085285065e+00,5.863216575797151719e+00 -7.173325572515190096e-01,-1.440903136015097230e+00,-4.563025185706399256e+00,-7.098001117370973390e-01,-4.208258195771115151e-01,2.594451641987153057e+00,4.269616605460259784e+00,1.966875208797961960e+00,3.226094930911747038e+00,6.020287567355888214e+00,3.980251475991546606e+00,6.606646644611915420e+00 --5.344161775827541705e-01,6.575126326774392016e-01,-5.766210270892349055e-01,2.080044842860721221e+00,2.597569170595709132e+00,1.316246656649133762e-01,3.840933541237355975e+00,5.846674991729185145e+00,4.098018048564538773e+00,1.600830935090639340e+00,4.628502633768053265e+00,3.585462029244003368e+00 -1.138068288338715872e+00,5.410281705276032138e-02,2.214383099182643644e+00,1.981718727958543980e+00,1.324496153816917765e+00,-1.821106292140853622e+00,2.401856988014751781e+00,6.656277522679868497e+00,4.283437503067081487e+00,1.384202228703339088e+00,5.999512681027482586e+00,1.528291132932185992e+00 -9.991403387521696766e-01,-3.811617114262618733e-01,1.206231109078025332e-01,-2.586171993187720286e+00,-6.952960055817554164e-01,8.046298768301501925e-01,2.150521954723291884e+00,3.733293634828790175e+00,4.514158688834689848e+00,2.548048973349378521e+00,4.355070413414093622e+00,4.427199029031395838e+00 -5.611440100001430231e-02,5.262380750425947884e-01,-4.953603576685280174e+00,-4.320037329811370341e+00,-1.907598969450934878e-01,1.078018569695111006e+00,2.658257906111644253e+00,4.382200981413503449e+00,5.859367423091468297e+00,4.029777956216626933e+00,5.067531354897734985e+00,4.945705073361552628e+00 -3.301613052297283080e-01,1.746862891830231190e-01,-8.118789816136265713e+00,-3.003177430316123608e+00,7.712387043869173286e-01,-8.794142886128657466e-01,4.258293663630688286e+00,6.153065962604465255e+00,7.355119360103516968e+00,8.075010494827537855e+00,4.923466375553967822e+00,4.909788839978878272e+00 -3.663509651997356009e+00,-2.874794400657883831e+00,-3.617695985036166295e+00,3.937863948442860362e+00,5.309231030866371492e-01,-1.234990868812363196e+00,4.339908625421151633e+00,5.340219971922238074e+00,5.515921408322084218e+00,7.798414613260440831e+00,6.873233673651917286e+00,5.826629709744591601e+00 -2.863535086443184330e+00,-2.579467235640812373e+00,9.915726649245656388e-01,7.298812062776931064e+00,3.223797297234991532e-01,2.727270606602266056e+00,4.237569677073842023e+00,3.860558543165812484e+00,4.509396332971866705e+00,4.004854402537130653e+00,5.452430701800734525e+00,4.871357265495228184e+00 -1.213134378267162194e+00,2.413579806889863677e-01,1.303675711696500095e+00,2.444639467618931139e+00,8.915073927774813667e-01,5.015301818356774355e-01,4.572111121969630432e+00,4.512712257921455361e+00,5.355957290748269983e+00,4.794247150280836145e+00,3.660108072753280872e+00,2.429556238521846101e+00 -8.319915033095108825e-01,-5.188516244935168498e-01,-5.687570288109577099e+00,-3.038003124026212198e+00,-8.719733907399778161e-01,-1.464298441207599888e+00,4.048062396070275071e+00,5.403303064625879060e+00,6.407665900443154428e+00,3.134788283354405625e+00,4.228899286686077730e+00,5.406063202984769944e+00 -9.700496358803372132e-01,-2.724024236513598485e+00,-7.053649446370279463e+00,-3.612813816796582422e+00,2.879464430993161628e-01,3.946224160478077891e-01,4.524770878655016482e+00,3.790844656041213234e+00,7.195887404329030623e+00,2.361511342912992362e+00,3.900621344854551253e+00,6.058011910665634048e+00 -1.357224041730048292e+00,-3.094356072793469803e+00,-1.008363536011465111e+00,1.046340672032861185e+00,2.368592569932105540e+00,-3.138197009761437006e-01,4.525455283146967211e+00,3.669994645124985411e+00,6.551577792201502604e+00,7.779296198399536344e+00,3.376143193000717524e+00,3.340663942774611428e+00 -1.202178532925514798e+00,-1.850307710282078411e+00,3.886862348001593581e+00,1.589445207794040638e+00,1.217857516123329376e+00,-9.122994252851901464e-01,4.653375618486451870e+00,5.177928739455077256e+00,5.315906173013113190e+00,6.297915587978544671e+00,1.981820108745978359e+00,5.388309049864337119e+00 -2.245772790529634744e+00,-1.516478417599159689e+00,-3.673322539992259772e-01,-5.204825623456811323e-01,-1.025887496633735574e-01,2.428428862906994556e+00,4.180686233113284800e+00,3.392074251314660671e+00,5.699029251570543408e+00,5.296396093445561171e-01,4.542328457752607740e+00,6.241995262291350599e+00 -1.441119035565789019e+00,-1.826409292124326900e+00,-5.940822212463316099e+00,9.652051478386654004e-01,4.052444759094278592e-01,1.610732939294483357e+00,7.038160704560359804e+00,3.766346935888272185e+00,5.999371763645894973e+00,1.156864407980374487e+00,5.145335831238891267e+00,4.119984700183874970e+00 --5.465696330630986921e-05,-1.719338771172416624e+00,-6.508425086827772432e+00,2.181828702209119619e+00,1.108177098201627686e+00,-1.781879468348538165e+00,4.807427395222168443e+00,4.255280106544435448e+00,7.009375617216063503e+00,5.319221720194350134e+00,4.137737097068861836e+00,3.233320057309073015e+00 --2.661176385278094525e+00,-1.395987375772736572e+00,-1.270103392180482293e+00,1.110821262560730549e-01,-1.029079612725555704e+00,-1.728302907832516833e-01,3.457531085129489146e+00,7.444270232949445898e-02,4.591567366097629943e+00,5.417009442651861129e+00,3.131299035156449584e+00,3.515272342145693507e+00 --4.686224156394166229e+00,-1.850331606786910887e+00,2.184759340112722548e+00,-2.041081623865537598e+00,-1.542782696707952095e+00,4.958978337833580552e-01,3.116982008029290707e+00,1.386033864121762438e+00,3.652533172280717277e+00,4.691688663117650648e+00,4.512583138494862922e+00,4.507587864330768745e+00 --5.026691791720129920e+00,-9.200819435921131495e-01,-3.295321237037073248e-01,9.793109676417566245e-01,7.543043591655398394e-01,-1.011195992545088629e-01,3.457098235710537537e+00,6.072136861732305491e+00,3.598198391522849704e+00,4.228970068897724310e+00,4.217642302531045218e+00,5.607026952466911851e+00 --3.511138640213175677e+00,-9.744145346579490274e-01,-5.043254897882221144e+00,6.789267989097658162e-01,3.161919450570618739e+00,-1.171798760336695056e+00,5.461214551316098564e+00,5.287352572723380106e+00,4.399439539341724270e+00,3.500205802814739364e+00,5.656071780409133609e+00,4.741463207851571049e+00 --3.703237619293762606e+00,-4.488315090217093406e+00,-4.263381366707646336e+00,-1.872655445832931420e+00,1.413418391066827429e-01,-5.997420261799208951e-01,6.633267216554529888e+00,2.943362263037925697e+00,4.342340090444274381e+00,4.751667085587318873e+00,4.529073913784968042e+00,3.139337629007510788e+00 --3.100678161902563179e+00,-2.501030560910059375e+00,3.785869496888828040e-02,-1.776633578233102195e+00,-6.457530033435663341e-01,3.069899785414676785e+00,5.720232385386712970e+00,5.392964007325415032e+00,4.158870978293465193e+00,5.953350079983978915e+00,3.232746935335669036e+00,5.658934085315204143e+00 --3.347081575849270241e+00,-1.875566408155557863e-01,1.373805813135958065e+00,-1.175124539027547321e+00,-3.084582261338173481e-01,2.304849343261525796e+00,5.650071425020970040e+00,4.727432440567961081e+00,1.757391563622786812e+00,5.363571028525146112e+00,2.433652791019824235e+00,5.946684745186741949e+00 --2.549657823098020692e+00,-1.580664376558950313e+00,-2.049692467577199473e+00,1.306927984545027899e+00,4.255303096775275296e-01,-2.131719678615784375e-01,6.492867362025509514e+00,1.882685262636422330e+00,4.711271395093374892e-01,5.254387646473789530e+00,4.361070860586611531e+00,6.288490364663671173e+00 --4.155153224244512167e+00,-1.638103590486665206e+00,-6.264898634428782920e+00,1.951636392749369175e+00,-1.053489723650152143e+00,3.729657763428335659e-01,6.853313246252220381e+00,3.201130691610447609e+00,2.033728470151814793e+00,4.500078973468650823e+00,4.296945244227881489e+00,2.029974511760805456e+00 --4.141704456103339815e+00,-1.009012869991860128e+00,-5.401447994645364403e+00,-2.514643434433179170e-01,-2.706026508554485677e+00,3.268482030258669235e-01,6.328235046023614530e+00,4.065915530392780752e+00,5.646001626406993879e+00,4.280884981122723865e+00,3.923160234900610277e+00,-2.746902120133274394e-01 --4.718113904319100271e+00,-1.802684091603253602e+00,-1.506657146317793128e+00,-1.004083775292140324e+00,-2.609366810774318424e+00,7.097452710287838196e-01,5.863057920820553548e+00,3.192879991044643706e+00,4.833576665416503104e+00,4.142242653072422343e+00,3.255009115094919636e+00,4.959172727951663973e+00 --5.049019775254446785e+00,-1.381563077028357345e+00,-3.607815631934928047e-01,1.537058909742779811e+00,-2.307742759492474427e+00,-1.690775288074841898e+00,5.308433259777813440e+00,3.269680422232153827e+00,1.538660360555280437e+00,1.342948121733243294e+00,5.136919446565984870e+00,5.150108154036455943e+00 --5.433552651702445502e+00,2.818885562711461595e-02,-3.279417244823288780e+00,1.545791434687723953e+00,-1.477440056825851222e+00,8.359621473279092996e-01,5.133107815890593173e+00,2.991393703367608570e+00,-1.508745328376193129e-03,2.571244256838248088e+00,4.478464671550294263e+00,3.667520676780307909e+00 --4.246275329602492654e+00,-6.130663520806219902e-01,-5.845062440614009702e+00,-1.952098650681894920e+00,-1.746264638512076361e+00,4.958869789032507303e-01,5.315079959506445739e+00,3.972743925412632215e+00,2.793269678504993969e+00,5.584940290483441316e+00,3.892535432986085731e+00,4.697240467623889693e+00 --2.706499974809537967e+00,3.924922185885181269e-01,-4.105156923265348645e+00,-2.044592367564284530e+00,-3.178980144414182973e+00,-1.493057516814380037e+00,5.021757939751064548e+00,6.339447365323519712e+00,4.945394441279042930e+00,5.264834653484440352e+00,3.886343321092996739e+00,4.424725523759203583e+00 --2.651467077068968337e+00,2.492099512157600394e+00,-7.841257565010855135e-01,-1.377536061630622699e+00,-3.839461700572829983e+00,2.582115106873133392e+00,5.587764431662366249e+00,5.405066853022743700e+00,3.860237202046897576e+00,4.107108869533949047e+00,5.487652977036795221e+00,4.996723475156893635e+00 --1.921174944783671457e+00,9.412006395738453657e-01,-2.625864606616614139e-01,-1.331777564711734918e+00,-3.478711161053920531e+00,3.905701109020744077e+00,5.133713232906468527e+00,4.494368763835435487e+00,1.307926951608490818e+00,3.830865832317300601e+00,5.178034455680792902e+00,4.871516117668107881e+00 -2.481098347911347979e-02,4.798757746073647468e-01,-4.210669215598828785e+00,1.519657317589790058e+00,-2.204219357018443404e+00,6.409508337405409595e-01,5.855966104246856929e+00,4.718316150574986878e+00,-1.208460825345432532e+00,3.876182703520312245e+00,5.371607561800637143e+00,4.279769579332299401e+00 -5.608947015146614756e-01,3.307660883373833016e+00,-5.302269633643058810e+00,1.154513370240555892e+00,-2.508472975056526444e+00,-9.069255698958078593e-01,7.453500872994089477e+00,6.136685070718774782e+00,1.577779596919128835e-01,5.811348619317231012e+00,4.013359064508622787e+00,4.274173284761452862e+00 -6.918019772089871733e-01,1.466478696550987060e+00,-3.621146700997774914e+00,-1.135210265489079218e+00,-2.270093923835809324e+00,-2.154054312539678939e+00,8.548631459854339809e+00,8.580677927587142761e+00,3.573725713298052664e+00,7.419094220950942287e+00,2.303293360327956396e+00,2.738280109656968442e+00 -5.933258203294379252e-01,-1.131123560438463294e+00,-1.605861339065546733e+00,7.302998686819985874e-01,-2.052409032866207905e+00,8.576733138557379732e-01,8.266456399981672476e+00,6.358139262462048080e+00,2.805261999954582031e+00,6.240747420223196507e+00,3.704750869121015366e+00,4.521147653224271146e+00 -1.445654395659279334e+00,2.089564369019220602e-01,-6.108205565391249259e-01,1.528236749888444379e+00,-2.038589923301223017e+00,3.072549047348906104e+00,6.531145968474424635e+00,3.659707778937812517e+00,1.361490758831767067e-01,4.413772251719586315e+00,3.702561277299750042e+00,5.074465917376252300e+00 -2.763119467305571675e+00,1.169930847882693303e+00,-3.410135203575566987e+00,1.227364972010502875e+00,-3.034243912264597220e+00,-2.259853471281571657e+00,6.228711353324530187e+00,5.390345214270871210e+00,-7.765975881852327234e-02,3.032668654013583875e+00,3.620297372992901774e+00,3.283416564361951284e+00 -1.617596157559101577e+00,9.274198691471613465e-01,-4.341813534124731255e+00,1.443200242866820027e+00,-1.215922849984176546e+00,-3.882836332642096711e+00,5.969829029753539906e+00,7.359973068273208519e+00,2.975172798878382707e+00,3.885288949269857905e+00,3.261339717609219591e+00,4.941919813889841251e+00 --6.576508653258905657e-01,1.632997792991522701e+00,-4.102480222204741622e+00,-2.327795448610795681e+00,-1.266255394890709374e+00,-1.779812888348156319e+00,3.960817346595896460e+00,7.353020846842392544e+00,4.432314530601575875e+00,6.348169316565152265e+00,2.502149813883460361e+00,6.128471791598121499e+00 --1.013240803735802720e+00,-8.014628681214885919e-01,3.914986840899608023e-01,-4.182871213923220921e+00,-2.436187298675244417e+00,9.875968433588133699e-02,4.923257732371997264e+00,5.474074476059730188e+00,2.808478061409179993e+00,6.312024025740812228e+00,2.064002107289165000e+00,4.387602522912947833e+00 -6.197151744087525671e-03,-3.149234092173039290e+00,-1.654102488856168396e-01,-3.412852847139776769e-01,-1.374484405133386211e+00,3.008339685703273148e+00,7.066411960496671973e+00,5.893878504480789537e+00,-6.355295163345058285e-01,4.776525929809087856e+00,1.379400472880342399e+00,2.425387915421347351e+00 --3.904493631610488347e-01,-1.264212846324544781e+00,-2.225130675970304672e+00,3.537047195554293033e+00,-1.110739285306775681e+00,1.763013865152736503e+00,7.669229880295137747e+00,7.033887082352526576e+00,-9.855532089657677020e-01,2.673791490967353468e+00,2.979947600959134135e-01,4.784590574609719482e+00 -3.385595454556619144e-01,8.543168756601032809e-01,-3.998827467911055589e+00,2.792529087814373145e+00,-8.980532521212727382e-01,-3.434307464143248279e+00,6.673272479165506255e+00,3.658839214659448835e+00,2.051941196236152010e+00,1.813070366847800763e+00,1.733569923426339532e+00,6.495342894923719967e+00 -5.457907858885177044e-01,1.406319903308074881e+00,-4.003999994455309519e-01,7.598421744986119108e-01,-2.194432074523071652e-01,-1.521827718638907589e-01,6.771372674723006746e+00,3.146117206284628320e+00,5.052401051425629674e+00,3.103934056600683000e+00,2.788192216146352820e+00,5.148535415200887755e+00 -4.558412795895108172e-01,1.104444297834388106e-01,3.593332594690472170e+00,-4.491949968717320330e-01,-1.249186860862924187e+00,4.848963876141733631e-01,7.417506132955815623e+00,5.706804425219697663e+00,1.215304517010837859e+00,4.780543049014910117e+00,1.268904798642988663e+00,4.344918043171323241e+00 -1.029599828608850798e+00,-3.071971053391178708e-01,3.348362969849801640e+00,-3.252650131375099285e+00,-7.798963774535210280e-01,7.979538575667203260e-01,4.762983994516790887e+00,5.907165620073715040e+00,-2.723269413874936262e+00,4.328361348759194094e+00,2.685950096418174837e-01,3.347897102570498884e+00 -1.859740361719270263e+00,7.446068548254569652e-01,-5.897576147674848945e-01,-2.397830480271298192e+00,-2.495209820399826750e+00,4.455774218736996417e-03,2.135666466920343343e+00,5.503827401310089407e+00,-2.574279522444244961e+00,4.861401535483356184e+00,1.201085947131324438e+00,2.655449252937117421e+00 -7.374741802790536482e-01,2.322398825206866135e-01,-6.388607919241395816e-01,1.558201954142270695e+00,-1.169242667594004637e+00,7.156079652442062233e-01,1.209797746695326470e+00,3.280608636259185751e+00,2.433976210884714053e+00,6.860566217677655843e+00,3.428522783072619262e+00,4.354001919376115026e+00 -1.950582162144144371e-01,1.650484120201193838e-01,9.187050848428088834e-01,2.397232901981297459e+00,8.846211368723222446e-02,1.915130839190936074e-01,2.395860524137094050e+00,4.892194719829085159e+00,5.158820345845165356e+00,6.636110962149698977e+00,3.735949827975017179e+00,6.185136809968994065e+00 --2.166162878878472142e+00,3.580732411560659245e-01,3.184832216671074079e+00,1.162138682750387453e+00,3.518870334769913510e-02,-1.993834323159580979e+00,3.195440828486001195e+00,6.371795030099142387e+00,3.311349987162779396e+00,5.273050129739965008e+00,3.450213330997203176e+00,5.173169406007716020e+00 --3.083572858198748712e+00,2.049828050997768880e+00,1.369940080228404966e+00,-1.968064312283657191e-02,-1.465151629194636929e+00,4.167797826275298489e-01,3.621458553235387345e+00,5.113646283697613981e+00,-2.572042403930124621e+00,3.535381998201279075e+00,4.275033910035618945e+00,4.819187497449219038e+00 --3.759345714741513689e+00,3.866920217978569774e+00,-2.103677830170176843e+00,-4.413827482702351035e-02,-1.884494713061559690e+00,-1.664631160098111740e-01,3.824917394299513163e+00,6.739145715447541818e+00,-1.224305480377220867e+00,2.002436272864535738e+00,4.451854545236884952e+00,5.366281616039342950e+00 --4.578524685542856076e+00,1.778297781524767585e+00,-2.342573708673763022e+00,-1.198012191420462447e+00,-1.087814814573651923e+00,-2.086338604224302173e+00,4.904593866914863654e+00,6.667415099684595781e+00,3.719124336327231717e+00,5.253414586500423589e+00,3.973608202969440839e+00,3.507768606291574276e+00 --2.825684221751323832e+00,1.017035095562613956e+00,4.304519462972637434e-01,-1.308161149994459205e+00,1.808840797870643780e+00,-7.167306005669666913e-01,5.610262983927929881e+00,4.565759051248619471e+00,6.723464872045028073e+00,6.383075616093323745e+00,4.233965708217839463e+00,3.808504549699757824e+00 --1.019503719482324655e+00,4.065875506696979613e+00,1.787319612937320779e+00,3.785871797822473184e-01,-4.859598417414792149e-01,1.860761205100296634e+00,4.634366340916597160e+00,5.953031621103274773e+00,2.001161800363862930e+00,3.602337542990413866e+00,3.525568024920475274e+00,4.793193174110071730e+00 -1.786485900516896486e-01,3.602429925837668634e+00,-3.578127110716021464e-02,1.737775079834363190e+00,-2.223910956932633187e+00,-6.312674771699231524e-01,4.791120446589096638e+00,6.647638521125486477e+00,-3.934713074554867518e+00,3.509959809463994773e+00,2.288498110193317281e+00,3.623256074928376869e+00 -1.377030533515761768e+00,1.868948230854070580e-01,-2.055257803764826807e+00,3.105518687464257521e+00,-2.031096529580536103e-01,-1.283138892990953739e-01,4.735750670689223618e+00,6.241461733830655056e+00,-2.176226623923147585e+00,3.986486597268589449e+00,2.201362078044586390e+00,5.134844960398845970e+00 -8.802257136493027989e-01,6.991610167673090181e-01,-9.424059896640493150e-01,2.691141223314705844e+00,5.549848900595336643e-01,9.033152868585698769e-01,6.019697038791218979e+00,6.081796153028275143e+00,3.477088450765858596e+00,4.767849061984209769e+00,2.942893996062047268e+00,6.421923977825514385e+00 --1.381855267987889269e-01,3.358675920631862155e+00,-1.269793476586484893e-01,-1.901853822405959127e+00,-6.386590191962148166e-01,9.989271146097631338e-01,7.635877472799798227e+00,3.757307194184470234e+00,4.359092498585787290e+00,4.700958837177121374e+00,3.883273996083135060e+00,3.670302715800173754e+00 --1.242603731882978346e+00,1.179047161150432732e+00,-6.655607401898910869e-03,-3.935150388695540347e+00,2.071500264828483040e-01,1.416157323149925595e+00,6.199934806643138785e+00,3.645732419870664209e+00,2.646933395806208011e-01,3.664391908397746533e+00,4.501513758231745932e+00,2.208611795199703742e+00 --1.980575304377616641e+00,-1.023312785625449362e+00,-1.076612247841705283e+00,-1.100154803106146773e+00,3.105740538097447878e-01,-1.219956835939274731e+00,5.805317603722761177e+00,4.728494949076917919e+00,-3.770431797980792865e+00,4.421206940584859524e+00,2.599085846756127083e+00,5.651612737237668327e+00 --1.516710654879326103e+00,-2.323655834268083531e-01,-1.979048757027759287e+00,1.308402345296668212e+00,4.294516415566843603e-01,-2.959675111221925503e-01,7.029773949233700137e+00,3.801580229547635970e+00,-1.745096512269221023e+00,5.483005158758310849e+00,3.949109605587119098e+00,7.534620038935655018e+00 --1.008203449196294788e+00,1.018038906638927266e-01,-1.753742555218650745e+00,1.038464361603437469e+00,-6.402963602107260010e-01,-6.128757175426360337e-01,6.356158567751506894e+00,1.796323243505194789e+00,2.363618419278969540e+00,5.121392245046798131e+00,3.177370768889981267e+00,4.039345223032076326e+00 --1.895911417887634620e+00,-1.353493576124875197e+00,-4.044122617485268467e-01,-1.680421006422703423e+00,-2.941734439100744392e-01,-1.290065374186450287e+00,6.494096475458992934e+00,3.748179576591863071e+00,4.345436684232513613e+00,6.504187099950051731e+00,4.161836780587097273e+00,3.639340715987385177e+00 --1.010428440734556510e+00,-2.913311355246178458e+00,2.987635913468410231e-02,-3.279635424953385225e+00,-5.459629222529611692e-01,1.379276987703866642e+00,6.519992428468716739e+00,3.821967605340175211e+00,-1.366874441741661173e+00,5.596444265884471747e+00,3.368739435092725110e+00,4.269651927955164794e+00 --3.836429733507790152e-02,8.992082546887900651e-01,1.676376391647645958e-01,7.869692148824878331e-01,1.483872795671443257e+00,-1.291049843292832566e+00,6.845329173990874594e+00,1.840953010689554414e+00,-4.601262519633495529e+00,2.963292356885109502e+00,4.284365403802065764e+00,3.582885009051824721e+00 -1.133727084981246636e+00,1.667861749572196128e+00,-1.002822474434479805e+00,4.399034951128378168e+00,7.289220369490916340e-01,1.313710382185457659e+00,7.786877049577780596e+00,2.650399780779336911e+00,-2.175503178058279197e+00,4.858432397219559107e+00,3.588870920164624501e+00,3.967739611535218369e+00 -3.028928456183230367e+00,-2.343504079657432904e+00,-1.735220721547379163e+00,1.502014569001267175e+00,-2.000598529814214821e+00,3.063481019302902730e+00,8.711264538275468539e+00,3.939224384803575507e+00,2.306019998494043044e+00,3.869607166962059530e+00,4.493807489500436070e+00,5.438974973144797609e+00 -1.912884418398078434e+00,-1.017257808087804527e+00,-1.563967274598344614e+00,-5.979828118169634443e-01,-1.818763994069724443e+00,-8.957388571460802584e-01,7.842133100651661692e+00,4.706804462653856547e+00,2.062018851046009793e+00,2.013173749054140593e+00,3.174823864850833566e+00,5.055864408312224612e+00 -1.936123025836812506e+00,-2.881807603031398823e-01,-6.322762488805152570e-01,-2.405318421708155241e-01,-1.117212170935864696e-01,-2.089224422518605184e+00,6.859814222292866859e+00,4.305888056446495860e+00,-1.504652401585079957e+00,2.724891608993822878e+00,1.953893529791669970e+00,5.706103435960124592e+00 -1.618221552723499768e+00,-8.002646754975277776e-01,-1.541544171952438758e+00,-5.183997404836990786e-01,-1.770290809661849929e+00,-7.887086637616680385e-01,5.784084225940442359e+00,3.699792675914003404e+00,-2.693492530826771869e+00,3.368124005382250985e+00,3.150094770688928847e+00,3.256005576777363686e+00 -2.080028156160730113e+00,1.305113373605160998e+00,-1.579242564349136346e+00,-1.031759963327423035e+00,-2.296390069112169030e+00,1.113842869659726453e+00,4.258537816670777687e+00,2.666290978369139530e+00,6.042940022593139560e-01,3.276888245125996413e+00,4.134579687760838063e+00,3.667555485766595069e+00 -2.067576750771584759e+00,1.650518389633325311e+00,-4.526871086243589026e-02,-5.336818663506124327e-01,-4.757304941173887691e-01,-2.213097572202314289e+00,4.381036304112623014e+00,3.610682949304262923e+00,3.614310001038992581e+00,3.483275795388367513e+00,3.583129797372695435e+00,6.389563758440964136e+00 -3.786017738921322096e+00,-4.475021169250421038e-01,1.094641211652286561e+00,1.816604243070493174e+00,-4.137960824756629630e-01,-6.128853266638312203e-01,4.872969010800280110e+00,4.909625339754276396e+00,2.644042578184392145e+00,4.099073312922418388e+00,3.757909776778407540e+00,6.151163093415396688e+00 -3.483896602799633424e+00,-1.711015863873070142e+00,-5.104956170019500084e-01,2.859627247942449380e+00,6.568479639301256778e-01,2.204250097643947193e-01,3.773568767145410341e+00,5.060001513972095744e+00,-4.371107711584038213e+00,4.335962442428765939e+00,2.918503197900284896e+00,5.090574584299498717e+00 -4.576394942912266117e+00,9.426496451287432743e-01,-3.019032842906256509e+00,1.798851426301030454e+00,1.871164952159827033e+00,2.498610384954699504e-01,4.798982163132473389e+00,5.230029489935797749e+00,-5.885922879260419904e+00,5.282686299998096580e+00,3.374482995803665730e+00,4.608386496870978455e+00 -4.163200436344271615e+00,2.165441207107301036e+00,-2.376162131368675823e+00,-1.205931204947916502e-01,1.637935602027619009e+00,2.460981688298094383e+00,3.263146420542350601e+00,4.191488902458624821e+00,-1.815598761535365924e+00,6.212258418129863458e+00,3.817028694278706613e+00,3.317411050370447256e+00 -3.928017036429528375e+00,1.333673631104955071e+00,-6.458178088186522503e-02,-2.614809517409372397e+00,5.404482446485152636e-01,9.982797489639411159e-01,1.248171468675097984e+00,4.857115197313684796e+00,2.164447559332587367e+00,6.784390463654808912e+00,3.026009329226857947e+00,4.269082725882513252e+00 -3.342461135564160024e+00,1.813969413192768165e+00,1.271570493954519598e+00,-3.241552141702179313e+00,-4.107566534691116100e-01,4.554059273203924230e-01,1.474089932010937964e+00,5.396862418488866275e+00,-1.335624732277507221e-01,2.423500390873731813e+00,2.723503842105398753e+00,5.101459630326847794e+00 -1.799652716973078714e+00,6.944756218125078817e-01,-4.194005994748716826e-01,-1.083790345331692162e-02,6.616246977677542329e-03,-1.257618932044302751e+00,3.330856688646777641e+00,4.323617273943575867e+00,-3.618646261984713064e+00,1.108704578913731531e+00,2.418932105367774810e+00,5.699769803981489247e+00 -1.672723233610053040e+00,-4.900712705065462388e-02,-2.612987182637256378e+00,3.631112288887047956e+00,-1.299422808443320698e-01,-2.561113511042099944e+00,5.022410746851131869e+00,5.238189687817999207e+00,-5.493461308644601360e+00,5.161521639309377463e+00,1.877491626970969385e+00,4.035544796621502783e+00 -1.611958406255827869e+00,-3.091732562761744241e-01,-2.765880768238859311e+00,2.818067235798883630e+00,-2.986442384781844983e-01,-9.983095628402856025e-01,7.515330779140204420e+00,6.175506103179614570e+00,-1.835135034308897239e+00,4.823193971936775348e+00,2.719501996436998414e+00,4.329517824313737329e+00 --3.032516886443232806e-01,-5.896004093191058448e-01,2.372919865267560935e-01,-5.532374295282149745e-01,-3.508148009550305368e-01,9.537541883800373377e-01,7.701903546473512385e+00,6.350400019395164186e+00,9.933807002980135081e-01,3.613308445084824072e+00,3.691216874039356277e+00,4.982059011101596546e+00 --1.337959007393631605e+00,-1.002276626933851444e+00,9.579227200033106904e-01,-1.678868230406534767e+00,1.037460164619667724e+00,1.764842439121176332e+00,7.461032443895633470e+00,5.762777110943723358e+00,5.819435959838310524e-01,2.926436880405554941e+00,3.826542669273640396e+00,4.704638029968001511e+00 --1.233022460530211362e+00,-5.743711226531944547e-02,-1.380807697986605742e+00,-3.842982498171382044e+00,-3.134900402257079222e-01,-1.635880325408218816e+00,6.701998499399708820e+00,6.226440851859729619e+00,-2.691285694757134195e+00,5.804829449411999853e+00,1.525138823745269434e+00,3.493815860942671492e+00 --1.865205496337602575e+00,1.431710762843490192e+00,-2.718995054907896503e+00,-4.593443294069448513e+00,-1.383122156873124453e+00,1.309052096583116498e-01,2.337293587335182110e+00,6.260320734918286334e+00,-2.818298318179963680e+00,5.260742496647727862e+00,3.769612237166617419e-01,5.288363853086943678e+00 --2.206628938695722741e+00,1.551514493449069221e+00,-2.413132556106665660e+00,1.178316695476375520e+00,1.457335410974212042e-01,1.204115957202990028e+00,4.698235367745073887e-02,4.896590014458261031e+00,-1.378430015980649515e+00,3.170740511370717662e+00,2.494619471374079023e+00,6.962789005562388667e+00 --1.860135976104381861e+00,1.798906615382426466e+00,1.434350091326882515e+00,5.291584491944156809e+00,6.768586675674830300e-01,2.077446917403265036e-01,2.813578563378360542e-01,4.655291061699078803e+00,-8.382086229604484018e-01,3.621502355422674846e+00,2.593844517273805383e+00,1.607291002811817915e+00 --3.124042680743388534e-01,7.577307867890256121e-01,2.121353399933312289e+00,1.350895633914931615e+00,-2.789513697782860513e-01,6.753067582859945128e-02,5.177064883997042344e-01,4.187754565173992916e+00,-5.103936229277223902e-01,4.061669028788871927e+00,4.315741458371293859e-01,2.824313347012796793e+00 -1.130011797480357849e+00,-8.391072593324180318e-01,-4.237611371298914964e-01,-1.933216574495730145e+00,2.742387868777406146e-01,-2.994752019428656986e-01,2.576689234901710979e+00,4.197275424097892227e+00,-2.993473679205940208e+00,4.366762917006225209e+00,4.797295118434252714e-01,5.492514421382309386e+00 -6.013461290273438564e-01,-7.688129699543494455e-01,-4.897451212908876528e+00,-2.480162142973378891e+00,2.630291790748110259e+00,-6.105555302220355696e-02,2.831441988274062460e+00,4.603751886326595155e+00,-4.760866155975501002e+00,4.405816125861661270e+00,3.255181258612026785e+00,4.946983860710324699e+00 -1.826389000794743867e+00,1.360554260325545606e+00,-2.665441211971079749e+00,-2.956296864889385922e+00,2.293554557599212451e+00,-3.452188178759938442e-01,4.107295149898662956e+00,3.829840697084754275e+00,-5.231379965522780395e+00,3.965474815393267960e+00,4.403908204209739274e+00,3.258035887312124679e+00 -1.394651957441011003e+00,2.188037568200167815e+00,3.918724289346349199e-01,1.396245913849419318e-01,1.029139151119064621e+00,3.966988293802046206e-01,4.213206276829068564e+00,2.317514670608773919e+00,-2.288681983437578005e+00,4.481009799782624370e+00,3.195088291628811916e+00,6.864843505746224928e+00 -1.317933650233626075e+00,6.196489704735803627e-01,5.453711560110505729e-01,2.872940919956571637e+00,-2.861319974669730426e-01,-4.446493412264039824e-01,5.341245468866937074e+00,2.402731574217447097e+00,-5.277416786616644373e-01,4.779774807980386342e+00,4.143791484593146279e+00,5.334646907298597363e+00 -2.078369651602372858e+00,1.055758634535370888e+00,-3.462383431273309942e+00,1.678371156089661920e+00,1.137865298635378331e+00,-6.250099977772416437e-01,7.340207577136370176e+00,3.994784883241460527e+00,-1.744308929395891106e+00,3.878048144569627986e+00,5.605253434021880032e+00,2.521198941506532343e+00 -3.149956991405946649e-01,3.999108922925003196e-01,-3.508493449203123227e+00,-1.183807484020850787e+00,2.048243675682622555e+00,-1.041283196264988886e+00,8.555754908569184636e+00,5.741224396017981846e+00,-4.554342668069113031e+00,6.039359007999831164e+00,7.062085973272534289e+00,4.566203609589495116e+00 --1.335484915511099269e+00,-9.496044867575319515e-01,-2.101464349861160485e+00,-6.019396382394651690e-01,2.167658385589843029e+00,1.143199269311364397e+00,8.813808214112786033e+00,5.019559988327137567e+00,-6.275070551017202902e+00,6.788630174725600597e+00,5.175258844591393625e+00,3.385675944182517050e+00 --1.762311053362322522e+00,1.495196654900621702e+00,-6.786751093886456143e-01,1.226921069235455075e+00,1.144931843234056057e+00,2.092742335952448407e+00,7.687264647442435539e+00,2.181909398040426940e+00,-3.808404812229319703e+00,3.953596456987713559e+00,6.473631774814190720e+00,5.310460369457939755e+00 --1.242235850301086186e+00,4.067472821829558605e-01,-1.266287029602956249e+00,-4.473018400253332416e-01,1.520755709265585232e+00,-9.731500771403120975e-01,5.208368016111155185e+00,3.098806325055673216e+00,-3.095765099554668609e+00,4.673366002556873511e+00,7.936264835799946127e+00,6.124787052766116346e+00 --1.047907593483522470e+00,-2.688763226369760506e+00,-3.113089820761620885e+00,-2.512693119715514456e-01,1.846380876804381899e+00,-2.282894962031603470e+00,5.906650769498868137e+00,7.191220081857573554e+00,-2.519878654915277849e+00,6.553610623994441831e+00,8.380986906793040703e+00,3.584350310865875411e+00 --1.844627464125474514e-01,1.122766995630273268e-01,-4.121025287637389845e+00,1.797875294197349039e+00,1.585009272754673182e+00,6.660819244520835847e-01,6.893875111118498822e+00,6.874602615116323534e+00,-4.767729824381188308e+00,6.979311980368485102e+00,6.773564231786926904e+00,6.240392836728278603e+00 --1.553126461549380721e+00,1.428128698225768156e+00,-3.387762219870576352e+00,-5.811216978785012088e-01,2.980713220670674346e-01,1.450360386619919950e-01,5.111534095669602706e+00,4.052598079611519744e+00,-4.344976218930746370e+00,5.539882879186871278e+00,5.024728618775607281e+00,4.627643543827098860e+00 --3.994786779790023612e+00,-3.030500860844555167e+00,-1.574365772133773156e+00,-2.334190637396901913e+00,1.041007653499790853e-01,5.435714176373549478e-01,5.284802151532128178e+00,5.099647470951653716e+00,-3.371047890540605607e+00,2.211015009308235690e+00,6.168632169582965119e+00,4.081638605238643080e+00 --4.951401337488436916e+00,-4.281597413464874435e+00,-1.446965890712373914e+00,-1.532118915967320305e+00,9.840892849850388924e-01,1.695384535337644216e+00,5.045181307419701611e+00,6.578181144299732885e+00,-1.598630209145174952e+00,2.396734116344834131e+00,8.386117100329082419e+00,6.105214003038710757e+00 --3.768335176734624703e+00,2.821499338348825159e-01,-3.841967183343977688e+00,1.219078443502037201e+00,3.382258767711604719e+00,-3.293032198175086522e-01,5.688952638614140689e+00,5.508928573275909457e+00,-3.851911527026878446e+00,4.279345597822666214e+00,7.042749744856436678e+00,2.811021302569785441e+00 --2.000378725346092956e+00,1.721205612915869709e+00,-4.147184173872242141e+00,1.613977079281302185e+00,1.245829365421335932e+00,-3.110864252931751506e-01,6.006974816112244930e+00,5.176304910092258460e+00,-5.261539750925154024e+00,4.346664796174861678e+00,5.904321977862432291e+00,4.917188698982512207e+00 --1.529114688558853796e+00,-5.520735524876594358e-01,-2.656566963244263668e+00,-6.190376217729831598e-02,8.875057108964318209e-01,-1.866651543240889777e-01,4.284550674581560692e+00,5.405435297673074402e+00,-4.775343473267573913e+00,3.799120905352531885e+00,7.318339719949468858e+00,4.545836912879165581e+00 --1.382077426139134202e+00,-1.460745776425037112e+00,-2.296335121286493530e-01,2.238441972820929271e-01,1.636529721440337148e+00,-6.973505035377318695e-01,4.318230763979286735e+00,4.610885275325963484e+00,-4.158230463276117561e+00,2.544854833773063874e+00,7.518444240314048344e+00,2.399475115996510866e+00 --9.776006139457309452e-01,-1.267066756597080346e+00,7.938082447649490447e-01,-1.413220720731109825e+00,2.171566498567953474e+00,-2.290059544218771670e+00,5.087184242756512376e+00,4.886844020442945968e+00,-2.652016073078574010e+00,3.729252890668482756e+00,4.806585388871521758e+00,5.511388409605797456e+00 --1.302065558268684642e+00,-4.226338953756407224e-01,-1.123615705427958300e+00,-9.783303329218313582e-01,1.007122282374927735e+00,2.021121699101357194e+00,5.785845498026464639e+00,7.340906335593323284e+00,-3.094773528188477840e+00,8.037933946174717903e+00,4.708212886471146774e+00,5.125970724634678177e+00 --1.605904370796717107e+00,-1.232593778691534192e+00,-4.337633722520342516e+00,2.543933438651938550e-01,2.444625552455861772e+00,1.588729289098186337e+00,3.946041794826312454e+00,6.208758917985891301e+00,-4.361938518167196577e+00,5.252633179210798531e+00,4.261297571469822465e+00,3.252001104075056492e+00 --1.367407857803577098e+00,4.737473641267300195e-01,-2.935007695235575476e+00,-1.540156451526306203e+00,3.867695856989417535e+00,-2.178328585195230538e+00,4.309242335980648875e+00,2.485156214386875728e+00,-4.796033594615050077e+00,3.794170355583785792e+00,5.729969752339677314e+00,4.005477881434223342e+00 --2.255653640435242302e+00,2.505874133578036123e+00,8.426518445301720006e-02,-1.024343935297204577e+00,2.751911341281242152e+00,2.095966177392705743e-01,4.137847859460531019e+00,3.843512617322950575e+00,-3.612947540317435369e+00,3.221189136472135672e+00,5.671551942901556842e+00,3.375065468211977837e+00 --3.921030993293422640e+00,1.630507909858668614e-01,9.695086723449181099e-01,1.016718128066200100e+00,9.242379484877117912e-01,7.640466204237239545e-02,3.887114122371945868e+00,7.938872834349187890e+00,-3.930522720000166004e+00,2.642365263037671497e-01,5.839611272192583691e+00,2.237288727564263002e+00 --1.936331010084744397e+00,-1.525296869443792058e+00,-1.887927094574152198e+00,2.889296467732928253e+00,6.674888846926975150e-01,-2.226209497065793741e+00,5.147626517807525559e+00,8.101203583561597199e+00,-4.036529054131140271e+00,1.460043503492038042e+00,4.555431949381168799e+00,5.518934066493400792e+00 -1.089177159792090732e-01,-1.480781935838115793e+00,-2.662620779159473372e+00,1.034614556451587841e+00,1.577832789279951964e+00,-8.318449178559828017e-01,4.720014009671788635e+00,6.953736907057773919e+00,-4.647597512732412284e+00,5.850995166966205474e+00,3.801198928568325641e+00,3.967097800413919018e+00 -8.857813357540065269e-01,1.626430617356564579e+00,-1.705199505776182178e+00,-3.030289740963192813e+00,2.396803525729378048e+00,1.417714739066025853e+00,5.888658474261237785e+00,5.147574616784825530e+00,-4.378121367908962114e+00,6.743064673446346191e+00,5.278554693669795128e+00,2.382535524532037652e+00 -1.379769462219410325e+00,2.641558084995923039e+00,1.825251076082317070e+00,-7.221922592021288168e-01,2.285832806744357182e+00,2.454150858434434923e+00,8.429248908043021515e+00,5.832642052619546824e+00,-4.493401982967586861e+00,7.640750679033470050e+00,7.192202538343089202e+00,3.930856752560546674e+00 -2.535850865204160254e+00,1.747903449949555599e+00,2.352021208472721003e+00,3.861460703360300784e+00,1.219586203910400179e+00,2.687019828144873168e-01,7.327886179081453122e+00,5.419198983552933058e+00,-3.686637380756415894e+00,7.427129093163848950e+00,7.238967956017155814e+00,5.079134620182847115e+00 -1.918686482430989937e+00,1.901521804749370936e-01,-2.233122763915311726e-01,3.527070754360397853e+00,4.868582213573212636e-01,-1.998178446545642029e+00,6.021843205344657335e+00,4.060183028189632282e+00,-1.173286360505835901e+00,3.527551104234932566e+00,5.874570741275926267e+00,8.095694785062841348e+00 -9.834708323740255498e-01,-1.767532034523959350e+00,-2.275029374245872038e+00,-6.330186885629787152e-01,4.817738872638805958e-01,-1.738891492324635024e+00,4.938549126197735006e+00,4.321312469822547797e+00,-2.291676683720838881e+00,3.359429581762446659e+00,6.659601894279155054e+00,4.369361129868212679e+00 -1.130922016234996885e+00,3.501437177607986673e-01,-2.197358340644369745e-01,-1.733438673028740329e+00,-4.798647781829540326e-01,-9.449763468903299435e-02,4.346695488232399818e+00,3.764062693634334433e+00,-4.867940086837688085e+00,3.469323980260952567e+00,8.250621485366245622e+00,3.199225863983878870e+00 -1.987774937541078035e-01,1.621324503807580308e+00,1.471824066182199298e+00,-1.574909450028733060e+00,-1.444874973655196015e+00,1.828513539814531796e+00,4.486520860130791988e+00,5.335209395860639248e+00,-6.103478888992682982e+00,5.295324495597192538e+00,5.824635096502237985e+00,5.422301366413537771e+00 -1.181252098985606347e+00,4.134968344910459059e+00,-3.613664434624896948e-02,-1.855481006435167224e+00,-1.404220683081923049e+00,-6.819517137232335990e-01,5.418137778231014501e+00,7.113217943178534064e+00,-3.589890359574852141e+00,8.317154553906529912e+00,5.537201981927316830e+00,3.586261854092550827e+00 -1.856101904956906079e+00,3.925455888329150156e+00,-1.552381011240769881e+00,1.217705537328990228e+00,-5.337796791751940884e-01,-2.701922740527604727e+00,5.226323506836971333e+00,5.226959111586891638e+00,-4.693813412962244414e-01,6.616116507490186649e+00,4.891452878561917572e+00,6.033164740847288599e+00 -9.164264286591120579e-01,9.674231528967295324e-01,-2.080074393406135425e+00,2.693222771650090053e+00,-5.630122689619707632e-01,5.148341904779039080e-01,6.093970864738022186e+00,4.375989109329739613e+00,-2.262852418509285535e+00,3.253090895327358290e+00,5.143223292101271049e+00,4.424456203474296245e+00 -6.455961493516636507e-01,1.280927382243075563e+00,-1.905045654688749313e+00,1.415314383325836811e+00,-1.366699782267320584e+00,1.488353364690308878e+00,5.993014095948725029e+00,5.663699500937534381e+00,-5.375949927543590690e+00,4.127864241623477426e+00,5.749506903894103793e+00,1.749369021285917647e+00 -1.579146930776830771e-01,1.672634254975262635e+00,-1.481153727250524854e+00,1.130489333255395579e+00,-8.576220968497062502e-01,8.751866702420707522e-01,5.507422646264366506e+00,5.434873638195526269e+00,-5.476225810067839816e+00,4.600462707919646732e+00,4.738172189305659643e+00,6.056550584723551012e+00 --2.909607925415798046e-01,8.724720745357823493e-01,-2.368999111916265665e+00,-1.715903225118668995e+00,-2.148380708258881278e+00,1.183998800131415452e+00,5.349573112733391866e+00,4.165787047009397170e+00,-3.323558655156207742e+00,3.223375661647526425e+00,4.119277002685489464e+00,4.599970682298828883e+00 --3.306645713722670132e-01,1.909979759437071323e+00,-2.031465225505573180e+00,-3.587823934881999133e+00,-2.860600747815385425e+00,-1.987472501142117043e+00,6.256079522580288099e+00,5.830789907284640528e+00,-5.847171202702305948e-01,2.975383640935380214e+00,5.490481902136949977e+00,4.387879926982361845e+00 -4.263228177411126918e-01,2.157561582795937127e+00,-3.316935045948691041e+00,-2.940545435776954264e+00,-1.864903705361835806e+00,-2.524928902417629573e+00,6.628072144779578068e+00,6.395597652102816610e+00,-2.696110597254691044e+00,5.952264941109109309e+00,4.257662994441342441e+00,3.703930638641808493e+00 -1.193366732033157351e+00,9.826325589930731130e-01,-2.516527984552007879e+00,-1.153359427414173410e+00,-1.706924925159805539e+00,9.752757465543320237e-01,6.304456315745787798e+00,5.489217295081757264e+00,-4.712329429705228279e+00,8.394840572117590938e+00,2.978300494010344579e+00,3.193680706741855779e+00 -3.602778714187880871e-01,1.836253113768347323e+00,-1.211580301569616491e+00,8.376741731740589003e-01,-2.925237604631134136e+00,3.386650484928460436e+00,5.494203647555167436e+00,4.661456842424625435e+00,-4.605837326283063859e+00,5.816965219020975297e+00,3.351541411691269179e+00,7.093379900479444444e+00 -7.232824621865587833e-01,2.360273753448378642e+00,-2.119738764212124593e+00,3.166679016710596706e+00,-1.933442555463390278e+00,7.646785509822175975e-01,5.160871861802657889e+00,3.943309158431016037e+00,-2.819566607777576728e+00,4.778156170584293427e+00,6.104584871203494245e+00,3.806052101117367137e+00 -4.895188127295139746e-01,1.598400661269894441e+00,-4.059440643799565152e+00,1.761520509834790360e+00,-6.930824119684195761e-01,-1.602894020209125081e+00,5.354020572274483136e+00,5.675753062682611372e+00,-2.074769113012789212e+00,4.534992326556606557e+00,6.374140926240183802e+00,2.995667537325232388e+00 --1.417146712806542030e+00,1.162540749823484543e+00,-2.821661381645870215e+00,-1.864701162965590076e+00,-2.148300520105935973e+00,-3.204283294137773574e-01,6.828004225253447856e+00,4.848636180667225126e+00,-4.807445855863754502e+00,4.322166784119967353e+00,3.357618936239717744e+00,7.477707704377309028e+00 --1.223100535472626760e+00,2.399805049786160716e+00,-1.881675152841980392e+00,-2.051458864990816977e+00,-3.036353204927186411e+00,9.162349161972951350e-01,6.747155584887854829e+00,4.424131394752608415e+00,-5.172862866739347254e+00,3.714503085883650080e+00,4.530739860216478121e+00,5.588621204537695952e+00 --2.331620117532256842e+00,3.091363422104554726e+00,2.498570506946000691e-01,1.156413474997386004e+00,-2.338453850701846193e+00,8.381982775322296408e-01,5.346875066248663266e+00,6.416893309367025289e+00,-4.329012813825567463e+00,3.400418611958983206e+00,5.652205932632920593e+00,4.984056855741751058e+00 --2.663511555637593009e+00,2.035013611904984643e+00,2.174964038665827548e-01,2.002099711433645624e+00,-2.770177392028111640e+00,-3.737854810035729503e-01,4.668187886431102385e+00,4.793161363316138512e+00,-2.160875966280079830e+00,3.039640789342107574e+00,5.039753264893064078e+00,4.928799220242923163e+00 --1.080341765991820191e+00,1.594274693509639906e+00,-2.597492751680841927e+00,-5.159732943759433033e-01,-1.862695010933665074e+00,3.223788650687365864e-01,4.396700210807001241e+00,4.087088907698947615e+00,-2.788140969679981040e+00,2.753411555394968069e+00,5.138471736541990964e+00,4.463078872331661451e+00 -7.757674270661427673e-01,8.185867493298183417e-01,-4.025774562613177565e+00,-1.778381660850697932e+00,-3.344381828802922119e+00,2.261965511720036570e-01,4.697408133444811895e+00,4.072430739626219598e+00,-4.819173401705638682e+00,3.401845223177137001e+00,4.451556870688682999e+00,6.392728433143465949e+00 -7.611104818252110160e-01,-8.286425486551746156e-02,-8.714099597121953700e-01,-1.392322392271791331e+00,-2.010274344700349047e+00,-1.187859749674115228e+00,4.220067923014012123e+00,4.198593555867003779e+00,-6.339609891012742082e+00,4.219375112814423012e+00,4.609282730777004566e+00,3.743959361957657883e+00 -6.790603894908597837e-02,3.587434096184727306e+00,1.621506077846058824e+00,2.664330740287865318e+00,-8.178962447624227305e-01,-2.098826921279402513e-01,3.445800902112551078e+00,4.990575469785755836e+00,-4.415085684552426670e+00,7.354874196964784616e+00,3.987913881285179318e+00,2.078377777453091824e+00 --2.114987211631109421e-01,5.188606737658716384e+00,-1.843969134213084282e-01,3.702957121339581548e+00,-2.006425116542370546e+00,2.737147109700642122e+00,1.169552537800597314e+00,4.655034887063085769e+00,-2.251466020449351291e+00,7.997480528203591454e+00,4.994447217168600517e+00,5.452196236741163382e+00 --2.044273108467230760e+00,5.835049444426541054e-01,-6.446928228212237322e+00,7.850216573166129974e-01,-1.431515617724336220e+00,-8.556040301668454973e-03,2.805575485367272748e+00,5.545649097145647133e+00,-2.639006556732359954e+00,4.314394547457116147e+00,6.591809101603595344e+00,5.870140067503667680e+00 --1.101726457346017263e-01,-1.620274964016493735e+00,-6.641148523763777511e+00,-1.094626423534233561e+00,-1.244275022996923497e+00,-2.347922786427476094e+00,2.986861694153729374e+00,6.013670007454654964e+00,-4.749572626239833895e+00,8.989575797021882586e-01,5.871049258764507783e+00,3.692157962858390707e+00 --1.355492099570395625e-01,-7.710785816490806077e-01,-7.460042509833899604e-01,-1.431969779711421920e+00,-6.658354460507526218e-01,-5.212556798169548644e-01,3.640715501686690558e+00,4.199800461076788949e+00,-6.923924389413910419e+00,2.409415786856320629e+00,4.723429037241177575e+00,2.118230114873473902e+00 -1.066566345600753474e+00,-7.877368489464271484e-01,2.450416683951246011e+00,-7.293212181132470162e-01,-1.831209305352096139e+00,2.128019456441309909e-01,4.630997173162287872e+00,3.153504864869932867e+00,-5.121705788364115719e+00,4.961928494411546708e+00,5.848549132384527560e+00,4.270064162359655136e+00 -1.193425803725096213e+00,-1.896760202919263172e+00,-8.562759139084447080e-01,1.806142938563724432e+00,-1.942094921675173502e+00,1.854619225282396755e-01,3.595501587468281190e+00,4.159012662003547689e+00,-2.807186266658454166e+00,6.385983535919036669e+00,7.004357507056047538e+00,6.747016245236693521e+00 -2.083761271791886305e+00,-1.276720735151101849e+00,-5.479757520210990052e+00,2.606765689959579024e+00,-1.441777419428687645e+00,-1.139184854626122068e+00,3.426172540266504640e+00,5.032483969662179923e+00,-1.070486597603297341e+00,3.698943296325017283e+00,4.604898685711097173e+00,5.158872725905403023e+00 -3.913190663892308852e+00,-1.838204721789715279e+00,-4.340440974661143869e+00,8.709417064315090329e-01,-1.368377580730799625e+00,7.857745914260003683e-01,4.206649879981476658e+00,6.144795896611343622e+00,-2.978755867683346104e+00,3.100088655314563901e+00,3.913506554336225296e+00,3.186174085507160303e+00 -4.353600527808068499e+00,-3.456748851778082887e+00,-9.091876232287365855e-01,-1.666746966739525160e+00,-3.835541755542447362e+00,7.176915106309048609e-01,5.999160702075048590e+00,6.123954378276296850e+00,-3.942832587262437016e+00,6.505938216379314909e+00,4.494098247601695739e+00,4.785774580268421730e+00 -5.751111549400718559e+00,-2.606365152484285463e+00,2.712259588367513796e+00,-3.002823022246964069e+00,-2.501744939896655584e+00,-5.772774549715808945e-02,7.052118926317236358e+00,4.556194363128761360e+00,-4.652746919986393337e+00,8.969322605193864817e+00,3.338448377151302715e+00,3.847965444282013703e+00 -4.412564108108409044e+00,-2.197886789007060138e+00,-5.546879011168117657e-01,-1.945269973105419936e+00,-1.544695102302104939e+00,2.213139708179573395e-01,4.681231933766181186e+00,4.183559885287595037e+00,-2.610332668439564685e+00,7.894978062034469168e+00,3.369021545659768080e+00,4.544912566977936663e+00 -3.799001718987409681e+00,-2.137054923626828806e+00,-4.457079794441980880e+00,1.226395757840899492e+00,-2.030637715223882367e+00,8.683575779812516271e-02,3.111532903220852297e+00,5.466003107294980623e+00,-2.524675485755766502e+00,4.537313889249457333e+00,2.228549041068663694e+00,6.596994726986956437e+00 -2.235099628414229045e+00,-1.228748151889619278e+00,-6.116059378706807159e+00,2.226761373624125628e+00,-3.167429537191251754e+00,-8.899723588492577941e-02,2.739760515153622400e+00,5.488751323872037347e+00,-3.267075187526824465e+00,3.180835210134046687e+00,3.677150338628789150e+00,3.917626231455117569e+00 -1.584180587095145309e-01,-7.852655966666166609e-01,-1.812135908497642056e+00,1.811381571465613849e+00,-3.868721454666545156e+00,-1.305486931701104858e+00,5.212141177367930922e+00,5.083612625330649948e+00,-4.227537965591766422e+00,2.436317686035841312e+00,4.677542309159430367e+00,2.750568805364736136e+00 --7.766652041322625877e-01,3.440854525018117727e-01,1.291619621175254196e+00,2.833256115689901211e+00,-2.566560115440099654e+00,4.448779037142116533e-01,4.720370609089600933e+00,4.390812785928678963e+00,-4.009024438940519630e+00,4.226275748530937015e+00,5.640016549047624395e+00,4.977936640280186786e+00 --1.666461426108118760e+00,1.785540568414713203e+00,1.805973667163953467e+00,3.702551833821284832e-01,-2.318949619335108014e+00,2.193260776215153474e+00,6.501093851038027616e+00,4.097104561187784988e+00,-3.380629817883459509e+00,4.993965235226390575e+00,5.038063111393355165e+00,5.840642169186212485e+00 --2.410407352193815633e+00,1.827937464828485847e+00,-2.753275686170964853e+00,-1.916569669564210932e+00,-4.188875821867545923e-01,-1.434728816777196769e+00,7.222656113864077909e+00,3.863332368140850015e+00,-3.557266307242549885e+00,4.348824044871342664e+00,5.264486958202707179e+00,4.345164796256563378e+00 --1.666239550604287967e+00,-6.080677676092067774e-01,-6.016779562497914569e+00,-1.597834161041584933e+00,-1.273227400451313995e+00,-8.484560339955301567e-01,6.599063039814783593e+00,4.026283348567725184e+00,-4.751951651617570604e+00,5.398147519999094257e+00,4.362588794554010718e+00,4.096358997545670455e+00 --1.655180023490592456e+00,-2.622239271382690973e+00,-3.750602427130763861e+00,-1.738716375420507498e+00,-2.284581807216647942e+00,2.062685576388656017e+00,5.465498868731979876e+00,5.571345962541135677e+00,-5.342053777125769898e+00,5.617987145165654006e+00,4.189733685873055613e+00,3.577745219813466715e+00 --2.004071539620203346e+00,-5.295128212567419279e-01,6.047322567267381377e-01,-1.965503770718391552e+00,-2.096982664784231787e+00,1.269103252111400681e-01,6.751060902271214914e+00,5.910331031458312623e+00,-6.200308696641878115e+00,5.715923284776543767e+00,4.030748268924964073e+00,4.600455274492318836e+00 --2.074304412279668330e+00,1.360712017343177704e+00,9.251475424004992343e-01,-2.383440075185936924e-01,-1.083413596973536563e+00,-9.697599544848316544e-01,6.441411052097585355e+00,5.461601793406312133e+00,-5.208872431540394032e+00,3.472725521286179617e+00,4.326691175634730158e+00,4.200401906248015571e+00 --2.612709753918561972e+00,-1.702468264429592981e-01,-3.373551225247852337e+00,7.280554488963433268e-01,2.319313567379491792e-01,8.094720158030519475e-01,6.446830655778663655e+00,5.538956232612101793e+00,-4.981796819407129817e+00,2.406619465325531948e+00,4.497009142654490255e+00,3.232925210389196558e+00 --5.287188450741622248e-01,-6.095815948953714480e-01,-6.264637216247121998e+00,8.323919370189147848e-02,6.473154449270843713e-01,7.835103380678738461e-01,5.984142948224764780e+00,5.229714827305749658e+00,-5.223713406750629673e+00,1.843460253666439286e+00,3.559375867868571408e+00,5.720746949205340215e+00 -1.427558787308593535e+00,-1.557302632852493318e-01,-4.597806285564792539e+00,-1.317988961575996765e+00,-2.188784664657661416e+00,-1.067990096138581313e+00,4.565907732054268031e+00,3.579763228869906833e+00,-6.308078353552398099e+00,3.078447130025423473e+00,4.832425975578442667e+00,5.011868797950294052e+00 -2.764127317348215929e+00,-2.686501783861953374e+00,-8.781008760933306512e-01,-1.878669373366243400e+00,-1.627916373166508723e+00,-2.543525014225115344e-01,2.474722339252600101e+00,4.017719734625418759e+00,-5.754602740312241949e+00,5.835979325068624490e+00,6.232338477435537705e+00,3.576325938558922779e+00 -1.543226017616111445e+00,-2.610207787022557291e+00,6.607750716330910157e-01,-1.309906651928841148e+00,-1.785712370039657770e+00,1.285798377024816475e+00,2.558548751022986067e+00,5.707837790419183932e+00,-3.563140784313892695e+00,6.480571766391602040e+00,6.253921768985826191e+00,3.881109340114460515e+00 -1.816779137249906473e-01,3.486292015042394832e-01,-2.840740209899343505e+00,3.379650278242448125e-01,-8.198855256058732444e-01,-7.181831308890092025e-01,2.723290370302909214e+00,4.875980960146384824e+00,-2.913466572202565885e+00,4.859607613288064343e+00,5.097350440448430220e+00,3.619998990410613793e+00 --4.815905563663047118e-01,1.928041997345812275e+00,-4.815165219484327963e+00,2.363014709460953711e+00,-2.363669136181059116e+00,-1.316525815903691443e+00,1.826417649917365882e+00,4.500876629176228327e+00,-5.308566677002847456e+00,2.653997718300394659e+00,3.362139387239098554e+00,3.670391412629564254e+00 --7.891240805373735956e-01,3.029472701355132802e+00,-3.758047625023251292e+00,2.288737409872123596e+00,-1.964455413069789991e+00,1.455759453557631034e+00,1.522305917526569630e-01,6.006149330688311139e+00,-7.122756661281025359e+00,3.790575847005788646e+00,2.664256832658303153e+00,3.063278519312641279e+00 --1.427442697307999886e+00,1.422722195605895079e+00,-3.664943911779358077e-01,6.045914555489940723e-01,-2.351577888029317887e+00,1.028308835477044436e+00,5.875058199155538396e-01,5.281385630745823789e+00,-4.937230596280533845e+00,5.133190913151953083e+00,5.076220016262959156e+00,5.934188040842376388e+00 --2.056090676664585004e+00,5.489115262117172378e-01,-1.190441230292737185e+00,-8.992542381752988145e-01,-2.054960448025206610e+00,-6.049953351586672134e-01,2.227411624213786467e-01,3.636772975192758217e+00,-3.001759071036719462e+00,5.036880885972359501e+00,5.495733033675128887e+00,5.145021568904277309e+00 --9.582695723029276413e-01,1.852360103617486153e+00,-3.063441926180958674e+00,-1.953418429471263984e+00,-2.071154166554376896e+00,-8.119869804624716414e-01,1.230599694952299483e+00,4.824080340429043545e+00,-2.341051401377774788e+00,5.870680100001074209e+00,4.469277296861590365e+00,1.075704134029249914e+00 --1.582376910602060760e-01,2.189889734009317301e+00,-4.747345179362754131e+00,-3.151931765461428170e-01,-4.509538900066920597e-01,-2.835531601968044879e-01,3.784604545548090115e+00,5.318221682007409967e+00,-6.180781124865308840e+00,5.646261655890590703e+00,5.054598689700086744e+00,3.793705868482696086e+00 -6.959286950578849051e-01,6.932436058180182492e-01,-2.546648440975163297e+00,2.062745429629262439e+00,-2.740780178961054858e+00,-3.269385040430629052e-01,6.430060983904160921e+00,4.100766536154779374e+00,-6.582948750427796369e+00,4.814588188534152380e+00,6.198388469805136580e+00,6.932549558323367833e+00 -1.685347181800349592e+00,-1.541952554884603810e+00,-1.621865943421657352e+00,1.936196794573352253e+00,-3.265755400220438531e+00,-9.629537655635460602e-01,8.370956373988427401e+00,3.038401085353682696e+00,-5.588498992214349315e+00,4.263243556096690590e+00,5.805831702509976822e+00,6.569832060772085036e+00 -5.619516977521019641e-01,-2.315973686782938357e-01,-3.281274591059596180e+00,-2.220697764081181469e-01,-4.628198926143993752e-01,2.151012543333026272e-01,7.745727657241504360e+00,2.685129616860215673e+00,-2.731165510437813637e+00,2.595087672753301433e+00,5.201334641044449647e+00,3.219474986801178940e+00 --1.349268180125596350e+00,1.655809891178515514e+00,-5.231539015833450712e+00,-1.165785651985337124e+00,1.252610657179511477e+00,-4.897586746070303199e-01,8.129168315619942575e+00,2.922562657270406206e+00,-4.085018491545847397e+00,2.790472003681623292e+00,5.383394530934806355e+00,3.586778506060198879e+00 --2.100389066768320045e-01,-1.494682972458926340e+00,-3.584572825600746970e+00,-1.401587609857073424e+00,2.251384033992794853e-01,-5.934183954548902795e-01,7.441139800707048835e+00,3.377305662599261460e+00,-5.913173941404687994e+00,4.668216539632784823e+00,6.737978412044866694e+00,6.493094468850287626e+00 -5.485587735747304849e-01,-2.068089318784516628e+00,-2.243471552217253251e+00,4.381068794323733639e-01,-5.229656391900303625e-01,-1.652041200608006077e+00,7.053254100833267870e+00,4.537533579414166240e+00,-8.793919469542744949e+00,5.559195251967260276e+00,5.876154813035684654e+00,5.726585109741426649e+00 -1.627196506847609525e+00,-5.942359193839552578e-01,-2.204020140251607973e+00,9.430541241391489082e-01,4.904723649761350557e-01,-2.143035632598147555e-01,5.841940752562459593e+00,5.217452086683557866e+00,-7.019671504862639466e+00,4.844290408304894768e+00,4.823209524717708696e+00,5.144027609039376614e+00 -1.046652319512691109e+00,-1.417153156446207873e+00,-3.026556964533265948e+00,5.928952139307764879e-01,1.397674651075489383e+00,1.968044498640865392e+00,5.644068693543877302e+00,4.831472166896567444e+00,-3.728254800048016904e+00,2.836757308576917325e+00,6.056575880500338016e+00,2.951545081531402381e+00 -8.954242013556753532e-01,-6.795137874097308961e-01,-4.097073586783590571e+00,2.247506211259183928e+00,5.553576947692637766e-01,1.975155891931058783e+00,7.154470288911317866e+00,5.087541508707205118e+00,-1.767290320286429406e+00,4.937862198091091948e+00,4.949358407465503262e+00,2.594159642289888712e+00 -2.024594371026763318e-01,-7.194769430688550127e-01,-3.691388889817485364e+00,1.247845108867251529e+00,-5.504762302810080055e-01,2.528715780911335309e-01,7.945662466242399802e+00,4.113873776637209545e+00,-5.456182505782537717e+00,5.985536488560954993e+00,4.227285404794214330e+00,3.250246637205135958e+00 --1.218874410964381871e+00,-1.758523559724022300e+00,-1.962711156225793285e+00,-6.279759711458275939e-01,-7.583446693541391337e-01,-1.969618325843885298e+00,6.209907318174950674e+00,5.171693078300720003e+00,-8.264793271933067231e+00,6.769228940532217464e+00,4.723855349476290932e+00,4.573308323698332956e+00 --2.148197705717832751e+00,-4.649936575244811854e-01,-1.035598528720010014e+00,-1.263971379281882523e+00,-1.440673375841695358e-01,-7.451968988208304001e-01,5.160471704650104741e+00,8.829560451716705671e+00,-8.539634940588666012e+00,6.936680962381640114e+00,3.235429173871032571e+00,4.781272951395972193e+00 --4.544314759852931651e+00,-8.545989354614917932e-01,-2.071564460370115768e+00,-1.859713842565478448e+00,-4.399036977135762916e-01,2.182765786583092726e+00,4.372573258042296906e+00,6.440370574082606048e+00,-4.042071270042839615e+00,4.212542412812976700e+00,2.781187122013275115e+00,4.479168097457306708e+00 --4.486765194885099284e+00,-2.890474908541997845e-01,-3.304044548606959175e+00,-3.223399180927806196e+00,-1.384748085255008476e+00,-1.868236729530170503e-01,2.959165170569567849e+00,4.369281965863371475e+00,-1.220165504047935556e+00,1.617402290075530047e+00,5.815718881738203727e+00,4.828299432327773921e+00 --4.170539930389232097e+00,-7.684023046047840921e-01,-3.367635804336703842e+00,-1.180843434519597279e+00,-8.993294612879940697e-01,-7.435772416320318712e-02,1.629105066454298978e+00,4.767559151104483917e+00,-2.219961471007065867e+00,4.903561968792375936e-01,5.598205773850448708e+00,4.913656571866479084e+00 --3.523686550790577687e+00,-3.056594155900486331e+00,-2.051846010921448471e+00,4.101145198083533394e-01,-3.884020341704730872e-01,1.523945307494584789e+00,3.781118856231072911e+00,5.366156690733308920e+00,-5.953217837094241460e+00,5.092017483868774619e+00,4.999319843892386039e+00,6.942958589801051517e+00 --1.231878446629819157e+00,-8.628479855229752582e-01,-1.852650007446889902e-01,4.164919293050345361e-01,-1.818600026041566453e+00,-6.325278618971902356e-01,5.727470622257770927e+00,6.606226639230827757e+00,-5.685092000716645444e+00,8.653686802421903579e+00,5.247871113133442122e+00,4.355589457239776863e+00 -1.956579062862326412e-01,-1.001402484624956291e+00,-1.648018653455823657e+00,2.434358954106604633e-01,-2.561738949798213039e+00,-6.890056798907447932e-01,4.056488502898882409e+00,6.690025192103396634e+00,-2.331567658168782309e+00,5.552714407733484236e+00,6.007846107145587311e+00,2.146593937367936356e+00 --1.337066816391997426e-01,-2.047520375519126468e+00,-3.749752336434905686e+00,-3.931332829248479488e-01,-1.077515842127685142e+00,1.542742312933534254e+00,2.580269691490353345e+00,5.901132988743353458e+00,-7.691250593657450452e-01,2.210681287616310442e+00,6.434215027836306611e+00,4.875300410899473746e+00 -9.668771311119847001e-01,-6.273495735455852929e-01,-3.464873707554440241e+00,3.764108608449281412e-01,-9.895920052136639722e-01,1.161514522980211739e+00,3.628069278143565946e+00,6.131490020509800409e+00,-2.807926682618692382e+00,3.781176319494426785e+00,5.644086292870081678e+00,6.295275496957129846e+00 -1.023428666895920269e+00,-1.490947232034514069e+00,-1.063370549377946617e+00,2.724388074533872217e-01,-4.805155746626946778e-01,-1.755875309538191154e+00,3.654881850994777803e+00,4.689417980765189675e+00,-5.528393825976277043e+00,4.667400875369982494e+00,5.661798326079645172e+00,3.049022038497731568e+00 -2.150110557047250293e+00,-1.621289848912258691e+00,-2.964810620807731834e-01,-1.087686635188899409e+00,-9.241544841718278835e-01,6.180224836607822736e-01,2.664532191692702057e+00,4.131871114110043486e+00,-5.334562974188928663e+00,4.024744383922982394e+00,6.436296956384975232e+00,3.522493084638647964e+00 -1.266245098542744252e+00,-8.409693056241449582e-01,-4.467070844934681872e-01,-9.450716491339776759e-01,-2.703589839154058039e-01,-1.247291591611538131e+00,2.352935829618759911e+00,5.294486498515911244e+00,-2.184654645284031638e+00,4.832681676656637393e+00,6.616047156565080023e+00,5.649768473177917194e+00 -1.256567595988526742e+00,-5.209123436247663497e-01,-3.554136627479212329e+00,9.851981865032901631e-01,6.066463566938560259e-01,-2.551671700953049182e+00,1.672367682222062335e+00,2.991517995066215185e+00,-1.891614166973342392e+00,5.547414844407583701e+00,7.085151694555714030e+00,4.583695315487106292e+00 -2.417146698983693387e+00,3.423420420221746086e-01,-5.468012133391135343e+00,1.912905303796538226e+00,6.560968323400628854e-01,1.772509328255130079e+00,1.402301657971260163e+00,3.194428186749342480e+00,-4.548483482978879167e+00,5.542356401725575310e+00,6.663883921111593445e+00,4.853820641199991570e+00 -1.999619757842334256e+00,7.041442580822796504e-01,-2.990899524647011809e+00,-1.416389906301699364e-01,1.065260057237635394e+00,2.783868292014194612e+00,2.188410837817223786e+00,5.161519878274964057e+00,-7.214884734592697413e+00,3.448410263284237054e+00,6.816941834764618235e+00,6.767420930176283989e+00 -9.646047438525601203e-01,3.112914907040449375e-01,-7.596712250334135552e-01,-1.123555366811338718e+00,-1.525906179172539012e+00,-4.163455596841435513e-01,3.793433139321964020e+00,5.691750360447885626e+00,-5.329084179349631967e+00,2.839667799187656883e+00,6.447140902238344573e+00,5.071253234787926445e+00 -1.035829306169633313e+00,-2.566617040964844776e-01,6.788594751020975426e-01,-1.970825217969614851e+00,-7.148301052070493711e-01,-2.998344115800775533e+00,5.006537963053346196e+00,6.376420985584982049e+00,-2.529233866513511231e+00,1.454425914995181479e+00,7.646398408348217046e+00,3.384971377637188361e+00 --2.080360628734745365e-02,-8.295629174762977343e-01,-2.782322861236617317e+00,-1.588768468010897106e+00,3.621706258074113283e-01,-9.711881462604883275e-01,3.767405040453775111e+00,4.703163164051236578e+00,-1.541082078909212782e-01,3.066442966190603414e+00,7.223949483958030271e+00,5.153175615421303313e+00 --2.176668615489226077e+00,-2.628618509971152317e-01,-4.108035577763891943e+00,1.697196899449040952e+00,-3.187822838173301143e+00,-2.520907834412113857e-01,4.623391059460301378e+00,4.388386902732331230e+00,-2.616420880557069673e+00,1.018395404878577715e+01,5.566905158542308385e+00,5.490643348955582148e+00 --1.645649893157671961e+00,-1.107320912818145486e-01,-3.029372871602017181e+00,2.739496055782805861e+00,-4.118440239559833849e+00,1.035907301016749438e+00,4.832636701292732262e+00,3.721206062337678144e+00,-4.841557479246951345e+00,9.145626172885091165e+00,5.354959420757586130e+00,4.607999112388112550e+00 -7.534819207451137757e-03,-1.807902225159827014e-01,-1.471331302585484835e+00,-2.877547850274729813e-01,-2.540128763284053015e+00,1.359912408171636500e+00,4.065396168114132180e+00,3.073067398277013140e+00,-5.552206172695049347e+00,5.626049868871119131e-01,8.077022293538270503e+00,2.753384930472553282e+00 -1.471081049679962671e+00,-7.594261866225597846e-01,-3.180930202323710709e-01,-4.211102352125222659e+00,-1.921930323566371612e+00,-4.543019441167683858e-01,4.572059863224049003e+00,6.011934241154982317e+00,-1.318420469346107460e+00,-6.846078514769136092e-01,6.252954475747578122e+00,6.060362421801588795e+00 -1.581846486994021195e+00,-1.260619695530512008e+00,-1.641419606323502034e+00,-3.646624699638710609e+00,-2.492475649143024885e+00,-7.179682574043153576e-01,3.321180551913100132e+00,6.104450065015529781e+00,3.719103162558630160e-01,4.445272293410904574e+00,4.173886015269443917e+00,6.095660541184315662e+00 -8.681041898081029995e-01,-2.368645667974292213e+00,-2.154724203230642399e+00,6.095112830632625212e-01,-3.475142993412908332e+00,-1.507253315314915909e-01,4.119815713223559328e+00,3.784732117363432735e+00,-1.764107174036215753e-01,4.822307205518326256e+00,5.689716050834727312e+00,2.054433501081257329e+00 -7.464971289730815140e-01,-9.377553478592643987e-01,-2.369222972776462655e+00,3.080844885536041744e+00,-2.529406287482803606e+00,1.845371391610271861e+00,3.968941913572073155e+00,2.010025383505255725e+00,-2.710883466293171118e+00,5.545202534322687171e+00,6.088343999382616722e+00,4.926708257184033712e+00 --2.586553280014266543e-01,2.427463824807717607e+00,-2.090201914853409448e+00,2.880203708208450841e+00,-1.997387761319865174e+00,-1.450964694687728596e-01,4.754551282526016109e+00,8.089492130796367420e-01,-1.601383943903982754e+00,5.737130497143407837e+00,4.782675577584496018e+00,3.677885129263262609e+00 --2.248670046397965194e+00,1.597540861553059521e+00,-8.412007692607401665e-01,-6.086934482895656195e-01,-7.480078423641320917e-01,1.489591720044582956e+00,6.654717642939099065e+00,3.186448218260450993e+00,4.658021322860061986e-01,4.823697145528843322e+00,3.414984659017686663e+00,5.487437335735822153e+00 --3.348281158520842737e+00,-5.568998104568737562e-01,-6.790211283644291829e-01,-2.212927024942084930e+00,-1.704558916373672073e+00,2.041808125015635689e+00,5.846991335580540294e+00,3.337831596232551767e+00,2.098744541113709783e+00,3.277236079611692610e+00,3.458622460117518838e+00,7.928515531651342485e+00 --1.442897591944459768e+00,6.003522344310079362e-01,-1.966815026637586783e+00,1.775244020653421462e+00,-2.148144986929363398e+00,-2.434428890479605201e+00,5.420332938498822806e+00,3.619542836945243813e+00,-2.954857026603994186e-01,-8.673261466362269800e-03,4.306900508165536046e+00,4.791877210409132282e+00 --1.155569605286546508e+00,1.618056367957397379e+00,-2.721818127545462040e+00,2.202103774385236967e+00,-1.438884608281589372e+00,-2.209300241018751887e+00,5.135802701613228827e+00,5.306944258591969721e+00,-3.018768166747916482e+00,2.699741853299243211e+00,4.617001162087944088e+00,2.222352491173781530e+00 --3.488644218277279663e+00,-1.118599074445602914e+00,-3.376181105811172145e+00,9.934883595704446790e-02,-8.931507530256685978e-01,8.389966878750507728e-01,5.211812976787111751e+00,6.012687555812719964e+00,-1.814331686260398868e+00,5.796907755573260879e+00,5.797227449713707514e+00,3.538194632175998056e+00 --4.525915842193184879e+00,-2.527108622493257961e+00,-9.006577877899053508e-01,-8.372490761748260235e-01,-6.829973961814593819e-01,-6.776843756425867671e-01,5.850405858604291964e+00,5.002963190238576274e+00,2.325373448708866864e+00,7.853608908550089751e+00,5.246662491251035121e+00,4.548401650003722096e+00 --3.528879989993249922e+00,-4.378075977148364051e-01,-2.926237906261323074e-01,-1.590005961376358634e+00,-1.022521221361091692e+00,8.323101314925480665e-01,5.232798442089570123e+00,4.633711673012578913e+00,4.109426929386741811e+00,8.901357845062772611e+00,5.209590230851312675e+00,4.065213765574621441e+00 --2.562622331885585414e+00,3.925845799236385436e-01,-1.507726935910972177e+00,-8.850671985609452896e-01,-1.397999139484212172e+00,-6.484968177871324180e-01,3.983304983396819043e+00,4.828507420812822915e+00,5.769515094244934694e-01,7.067279836372568624e+00,6.981173134628340549e+00,4.550353080264919470e+00 --1.856378488901909707e+00,-9.512754879744241832e-01,-4.717032237780614423e+00,-9.504417158092401952e-01,-6.267080868992040976e-01,-1.558303820395119388e+00,3.427519950071115318e+00,5.866323724575988052e+00,-3.580441950275538687e+00,4.626256440073035492e+00,4.503333861719587716e+00,4.957952609674167732e+00 --2.736969358397962626e+00,-1.946944243544922459e+00,-3.753120219952542413e+00,-6.556130942003135509e-03,-1.224376561236242056e+00,1.725091624855118466e-01,2.406053051552568434e+00,4.804445006333999402e+00,-2.561227109038438421e+00,3.115903428004851872e+00,3.301671559239126807e+00,5.966307568101433034e+00 --2.561640507428150748e+00,-5.380962687630521657e-01,-2.159943664332667268e+00,1.762954755872925938e+00,-1.427884855214213378e+00,-1.638143791690184425e+00,2.398718774257984521e+00,2.981422967539768987e+00,3.296672324359811945e+00,3.678250983605062974e+00,3.692330793902033559e+00,3.753516855710441913e+00 --9.913390773313958126e-01,-2.876758355812703005e-01,-1.429933999842837533e+00,1.642212480845887779e+00,-3.644403502179132115e-01,1.377236017855435035e+00,1.477832309500654695e+00,4.353594444037942601e+00,5.389545340331115320e+00,4.054115218922375696e+00,6.148688400590705427e+00,3.721102504340284334e+00 --1.056769796014828788e+00,-2.885595533548598324e-01,-2.794503445044821177e+00,1.083090011598699665e+00,-1.380432962374760208e+00,2.497806290360156112e+00,4.167258891973620649e+00,5.486485952944832434e+00,2.519490343192760662e-01,3.409630610888273239e+00,6.497457287133940440e+00,4.337604040259845029e+00 -2.274903620720447606e-02,1.178709472965589367e+00,-2.191907922540884179e+00,1.147040859111712718e+00,-1.401092978881083306e+00,-3.073230208175671940e+00,3.143035241306364469e+00,6.164143814132094334e+00,-4.478989164556972824e+00,3.865238097566201958e+00,4.555747830634098605e+00,2.919483985102220736e+00 --7.017977424499139971e-01,5.864426458347605786e-01,-1.052410741057270505e+00,-1.888349389452816052e+00,2.640406417979335174e-02,-1.251767083818635440e+00,1.900322526990272820e+00,4.288786027707170767e+00,-3.078458539960416296e+00,4.917959417157974578e+00,4.466967039398555883e+00,5.277507025041583866e+00 --4.761575079868316518e-01,2.033879914173333692e+00,-2.119219470474183797e+00,-1.503623604112065859e+00,1.245419952932540264e+00,8.331834933385302566e-01,3.225788328670576632e+00,2.650018181237939530e+00,2.432707009371107176e+00,3.829198120567822272e+00,6.021729223149659305e+00,6.293117544191258794e+00 --4.431170435912721306e-01,3.345382851260067181e+00,-3.650137045873921515e+00,-1.583532031681139873e+00,-1.326217744881489358e-01,9.462570124815141703e-01,4.987162622205645768e+00,4.128336989800555301e+00,4.831569541643757404e+00,3.211246100750840160e+00,5.343902502998334469e+00,4.456169118280132402e+00 -2.268017495856458066e-01,2.729099977591017634e+00,-4.479510528684833126e+00,-1.726598591179097086e-01,8.178885935778972271e-01,1.721393428966342398e+00,5.703450235584239358e+00,1.609590439570473208e+00,8.657061566026489352e-01,5.832052241539065207e+00,3.275055400053695553e+00,5.038320002771330408e+00 -6.307699118156784301e-02,3.615462745775235298e+00,-1.710251122774079313e+00,3.217555815846976586e+00,1.736861623118536535e+00,4.848628069717411115e-01,4.543319198418915406e+00,2.863637671601732215e+00,-2.504718404866139281e+00,8.080870330493329590e+00,3.499845759335292961e+00,6.471067310492218283e+00 From d87fb4ac409772b0c4428ebbcc2921ec015233f5 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 4 Apr 2026 20:53:00 +1100 Subject: [PATCH 16/20] updates --- lectures/blackwell_kihlstrom.md | 2 +- lectures/gee_simulation_check.txt | 0 lectures/graph.txt | 100 ------ lectures/organization_capital.md | 577 ++++++++++++------------------ lectures/theil_2.md | 2 +- 5 files changed, 236 insertions(+), 445 deletions(-) delete mode 100644 lectures/gee_simulation_check.txt delete mode 100644 lectures/graph.txt diff --git a/lectures/blackwell_kihlstrom.md b/lectures/blackwell_kihlstrom.md index 1a605a8ec..ad1cfb9d3 100644 --- a/lectures/blackwell_kihlstrom.md +++ b/lectures/blackwell_kihlstrom.md @@ -82,7 +82,7 @@ import numpy as np import matplotlib.pyplot as plt from scipy.optimize import minimize -np.random.seed(42) +np.random.seed(0) ``` ## Experiments and stochastic transformations diff --git a/lectures/gee_simulation_check.txt b/lectures/gee_simulation_check.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/lectures/graph.txt b/lectures/graph.txt deleted file mode 100644 index 9cb9e2e33..000000000 --- a/lectures/graph.txt +++ /dev/null @@ -1,100 +0,0 @@ -node0, node1 0.04, node8 11.11, node14 72.21 -node1, node46 1247.25, node6 20.59, node13 64.94 -node2, node66 54.18, node31 166.80, node45 1561.45 -node3, node20 133.65, node6 2.06, node11 42.43 -node4, node75 3706.67, node5 0.73, node7 1.02 -node5, node45 1382.97, node7 3.33, node11 34.54 -node6, node31 63.17, node9 0.72, node10 13.10 -node7, node50 478.14, node9 3.15, node10 5.85 -node8, node69 577.91, node11 7.45, node12 3.18 -node9, node70 2454.28, node13 4.42, node20 16.53 -node10, node89 5352.79, node12 1.87, node16 25.16 -node11, node94 4961.32, node18 37.55, node20 65.08 -node12, node84 3914.62, node24 34.32, node28 170.04 -node13, node60 2135.95, node38 236.33, node40 475.33 -node14, node67 1878.96, node16 2.70, node24 38.65 -node15, node91 3597.11, node17 1.01, node18 2.57 -node16, node36 392.92, node19 3.49, node38 278.71 -node17, node76 783.29, node22 24.78, node23 26.45 -node18, node91 3363.17, node23 16.23, node28 55.84 -node19, node26 20.09, node20 0.24, node28 70.54 -node20, node98 3523.33, node24 9.81, node33 145.80 -node21, node56 626.04, node28 36.65, node31 27.06 -node22, node72 1447.22, node39 136.32, node40 124.22 -node23, node52 336.73, node26 2.66, node33 22.37 -node24, node66 875.19, node26 1.80, node28 14.25 -node25, node70 1343.63, node32 36.58, node35 45.55 -node26, node47 135.78, node27 0.01, node42 122.00 -node27, node65 480.55, node35 48.10, node43 246.24 -node28, node82 2538.18, node34 21.79, node36 15.52 -node29, node64 635.52, node32 4.22, node33 12.61 -node30, node98 2616.03, node33 5.61, node35 13.95 -node31, node98 3350.98, node36 20.44, node44 125.88 -node32, node97 2613.92, node34 3.33, node35 1.46 -node33, node81 1854.73, node41 3.23, node47 111.54 -node34, node73 1075.38, node42 51.52, node48 129.45 -node35, node52 17.57, node41 2.09, node50 78.81 -node36, node71 1171.60, node54 101.08, node57 260.46 -node37, node75 269.97, node38 0.36, node46 80.49 -node38, node93 2767.85, node40 1.79, node42 8.78 -node39, node50 39.88, node40 0.95, node41 1.34 -node40, node75 548.68, node47 28.57, node54 53.46 -node41, node53 18.23, node46 0.28, node54 162.24 -node42, node59 141.86, node47 10.08, node72 437.49 -node43, node98 2984.83, node54 95.06, node60 116.23 -node44, node91 807.39, node46 1.56, node47 2.14 -node45, node58 79.93, node47 3.68, node49 15.51 -node46, node52 22.68, node57 27.50, node67 65.48 -node47, node50 2.82, node56 49.31, node61 172.64 -node48, node99 2564.12, node59 34.52, node60 66.44 -node49, node78 53.79, node50 0.51, node56 10.89 -node50, node85 251.76, node53 1.38, node55 20.10 -node51, node98 2110.67, node59 23.67, node60 73.79 -node52, node94 1471.80, node64 102.41, node66 123.03 -node53, node72 22.85, node56 4.33, node67 88.35 -node54, node88 967.59, node59 24.30, node73 238.61 -node55, node84 86.09, node57 2.13, node64 60.80 -node56, node76 197.03, node57 0.02, node61 11.06 -node57, node86 701.09, node58 0.46, node60 7.01 -node58, node83 556.70, node64 29.85, node65 34.32 -node59, node90 820.66, node60 0.72, node71 0.67 -node60, node76 48.03, node65 4.76, node67 1.63 -node61, node98 1057.59, node63 0.95, node64 4.88 -node62, node91 132.23, node64 2.94, node76 38.43 -node63, node66 4.43, node72 70.08, node75 56.34 -node64, node80 47.73, node65 0.30, node76 11.98 -node65, node94 594.93, node66 0.64, node73 33.23 -node66, node98 395.63, node68 2.66, node73 37.53 -node67, node82 153.53, node68 0.09, node70 0.98 -node68, node94 232.10, node70 3.35, node71 1.66 -node69, node99 247.80, node70 0.06, node73 8.99 -node70, node76 27.18, node72 1.50, node73 8.37 -node71, node89 104.50, node74 8.86, node91 284.64 -node72, node76 15.32, node84 102.77, node92 133.06 -node73, node83 52.22, node76 1.40, node90 243.00 -node74, node81 1.07, node76 0.52, node78 8.08 -node75, node92 68.53, node76 0.81, node77 1.19 -node76, node85 13.18, node77 0.45, node78 2.36 -node77, node80 8.94, node78 0.98, node86 64.32 -node78, node98 355.90, node81 2.59 -node79, node81 0.09, node85 1.45, node91 22.35 -node80, node92 121.87, node88 28.78, node98 264.34 -node81, node94 99.78, node89 39.52, node92 99.89 -node82, node91 47.44, node88 28.05, node93 11.99 -node83, node94 114.95, node86 8.75, node88 5.78 -node84, node89 19.14, node94 30.41, node98 121.05 -node85, node97 94.51, node87 2.66, node89 4.90 -node86, node97 85.09 -node87, node88 0.21, node91 11.14, node92 21.23 -node88, node93 1.31, node91 6.83, node98 6.12 -node89, node97 36.97, node99 82.12 -node90, node96 23.53, node94 10.47, node99 50.99 -node91, node97 22.17 -node92, node96 10.83, node97 11.24, node99 34.68 -node93, node94 0.19, node97 6.71, node99 32.77 -node94, node98 5.91, node96 2.03 -node95, node98 6.17, node99 0.27 -node96, node98 3.32, node97 0.43, node99 5.87 -node97, node98 0.30 -node98, node99 0.33 -node99, diff --git a/lectures/organization_capital.md b/lectures/organization_capital.md index efda3e91e..f6aa4e467 100644 --- a/lectures/organization_capital.md +++ b/lectures/organization_capital.md @@ -19,12 +19,12 @@ kernelspec: ## Overview This lecture describes a theory of **organization capital** proposed by -{cite}`Prescott_Visscher_1980`. +{cite:t}`Prescott_Visscher_1980`. Prescott and Visscher define organization capital as information that a firm accumulates about its employees, teams, and production processes. -This information is an **asset** to the firm because it affects the production possibility set +This information is an *asset* to the firm because it affects the production possibility set and is produced jointly with output. Costs of adjusting the stock of organization capital constrain the firm's growth rate, @@ -35,17 +35,18 @@ providing an explanation for The paper offers three examples of organization capital: -* **Personnel information**: knowledge about the match between workers and tasks -* **Team information**: knowledge about how well groups of workers mesh -* **Firm-specific human capital**: skills of employees enhanced by on-the-job training +* *Personnel information*: knowledge about the match between workers and tasks +* *Team information*: knowledge about how well groups of workers mesh +* *Firm-specific human capital*: skills of employees enhanced by on-the-job training In each case, the investment possibilities lead firms to grow at a common rate, yielding constant returns to scale together with increasing costs of rapid size adjustment. ```{note} -The theory is related to ideas of {cite}`Coase_1937` and {cite}`Williamson_1975` about the nature of the firm. +The theory is related to ideas of {cite:t}`Coase_1937` and {cite:t}`Williamson_1975` about the nature of the firm. + Prescott and Visscher stress the firm's role as a storehouse of information and argue that -incentives within the firm are created for efficient accumulation and use of that information. +incentives within the firm are created for efficient accumulation and use of that information. ``` Let's start with some imports: @@ -57,7 +58,7 @@ from scipy.stats import norm from scipy.optimize import brentq ``` -## The Basic Idea +## The basic idea The firm is a storehouse of information. @@ -68,11 +69,11 @@ size distribution. The key insight: the process by which information is accumulated naturally leads to -1. **constant returns to scale**, and -2. **increasing costs to rapid firm size adjustment** +1. *constant returns to scale*, and +2. *increasing costs to rapid firm size adjustment* Constant returns to scale explain the absence of an observed unique optimum firm size -(see {cite}`Stigler_1958`). +(see {cite:t}`Stigler_1958`). Without costs of adjustment, the pattern of investment by firms in the face of a change in market demand would exhibit @@ -84,7 +85,7 @@ usurping all profitable investments as they appear, thus implying monopoly more prevalent than it is. -## Personnel Information as Organization Capital +## Personnel information as organization capital ```{index} single: Organization Capital; Personnel Information ``` @@ -108,15 +109,16 @@ $$ $$ When a worker is hired from the labor pool, neither the worker nor the employer knows $\theta$. + Both know only the population distribution. -### Three Tasks +### Three tasks If $q$ units of output are produced, assume: -* $\varphi_1 q$ workers are assigned to **task 1** (screening) -* $\varphi q$ workers are assigned to **task 2** -* the remaining workers are assigned to **task 3** +* $\varphi_1 q$ workers are assigned to *task 1* (screening) +* $\varphi q$ workers are assigned to *task 2* +* the remaining workers are assigned to *task 3* where $\varphi_1 + 2\varphi = 1$. @@ -125,7 +127,7 @@ The fixed coefficients technology requires a constant ratio between the number o personnel in jobs 2 and 3 and the number assigned to job 1. ``` -For task 1, the screening task, per unit cost of production is **invariant** to the $\theta$-values of the individuals assigned. +For task 1, the screening task, per unit cost of production is *invariant* to the $\theta$-values of the individuals assigned. However, the larger a worker's $\theta$, the larger is his product in task 2 relative to his product in task 3. @@ -135,7 +137,7 @@ Consequently: * a worker with a highly positive $\theta$ is much better suited for task 2 * a worker with a highly negative $\theta$ is much better suited for task 3 -### Bayesian Learning +### Bayesian learning Performance in tasks 2 or 3 cannot be observed at the individual level. @@ -150,15 +152,15 @@ $$ (eq:signal) where $\epsilon_{it} \sim N(0, 1)$ are independently distributed over both workers $i$ and periods $t$. -After $n$ observations on a worker in the screening job, the **posterior distribution** of $\theta$ is normal with +After $n$ observations on a worker in the screening job, the *posterior distribution* of $\theta$ is normal with -**posterior mean:** +*posterior mean:* $$ m = \frac{1}{\pi + n} \sum_{k=1}^{n} z_k $$ (eq:post_mean) -**posterior precision:** +*posterior precision:* $$ h = \pi + n @@ -170,20 +172,6 @@ Knowledge of an individual is thus completely characterized by the pair $(m, h)$ def bayesian_update(z_observations, prior_precision): """ Compute posterior mean and precision after observing signals. - - Parameters - ---------- - z_observations : array_like - Observed signals z_1, ..., z_n - prior_precision : float - Precision π of the prior distribution - - Returns - ------- - m : float - Posterior mean - h : float - Posterior precision """ n = len(z_observations) h = prior_precision + n @@ -194,48 +182,45 @@ def bayesian_update(z_observations, prior_precision): Let's visualize how the posterior evolves as we observe a worker whose true $\theta = 0.8$: ```{code-cell} ipython3 -np.random.seed(42) - -# True worker type -theta_true = 0.8 +--- +mystnb: + figure: + caption: Posterior mean convergence and uncertainty + name: fig-posterior-evolution +--- +np.random.seed(0) -# Prior precision -pi = 1.0 +θ_true = 0.8 +π = 1.0 -# Generate signals T = 20 -epsilons = np.random.randn(T) -z_signals = theta_true + epsilons +ε = np.random.randn(T) +z_signals = θ_true + ε -# Track posterior evolution posterior_means = [] posterior_stds = [] for n in range(1, T + 1): - m, h = bayesian_update(z_signals[:n], pi) + m, h = bayesian_update(z_signals[:n], π) posterior_means.append(m) posterior_stds.append(1 / np.sqrt(h)) fig, axes = plt.subplots(1, 2, figsize=(12, 5)) -# Plot posterior mean convergence ax = axes[0] -ax.plot(range(1, T + 1), posterior_means, 'b-o', markersize=4, +ax.plot(range(1, T + 1), posterior_means, '-o', markersize=4, lw=2, label='Posterior mean $m$') -ax.axhline(theta_true, color='r', linestyle='--', - label=fr'True $\theta = {theta_true}$') -ax.set_xlabel('Number of observations $n$') -ax.set_ylabel('Posterior mean $m$') -ax.set_title('Convergence of Posterior Mean') +ax.axhline(θ_true, color='r', linestyle='--', + label=fr'True $\theta = {θ_true}$') +ax.set_xlabel('number of observations $n$') +ax.set_ylabel('posterior mean $m$') ax.legend() -# Plot posterior standard deviation ax = axes[1] -ax.plot(range(1, T + 1), posterior_stds, 'g-o', markersize=4, - label='Posterior std $1/\sqrt{h}$') -ax.set_xlabel('Number of observations $n$') -ax.set_ylabel('Posterior standard deviation') -ax.set_title('Shrinking Posterior Uncertainty') +ax.plot(range(1, T + 1), posterior_stds, '-o', markersize=4, lw=2, + label=r'Posterior std $1/\sqrt{h}$') +ax.set_xlabel('number of observations $n$') +ax.set_ylabel('posterior standard deviation') ax.legend() plt.tight_layout() @@ -245,7 +230,7 @@ plt.show() As the number of screening observations $n$ increases, the posterior mean converges to the true $\theta$, and the posterior uncertainty shrinks at rate $1/\sqrt{n}$. -### Per Unit Costs of Production +### Per unit costs of production Under the nonsequential assignment rule, employees with the greatest seniority are assigned to jobs 2 and 3, while newer employees remain in the screening task. @@ -268,47 +253,40 @@ $$ (eq:cost_n) where $c = c_1 + c_2 + c_3$ and $0.7978 = 2 \int_0^{\infty} \frac{t}{\sqrt{2\pi}} e^{-t^2/2} dt$. ```{note} -The constant $0.7978 \approx \sqrt{2/\pi}$ is twice the mean of the half-normal distribution. +The constant $0.7978 \approx \sqrt{2/\pi}$ is the mean of the standard half-normal distribution. + It arises from computing $E[\theta \mid m > 0] - E[\theta \mid m \leq 0]$ for a normal distribution. ``` -The function $c(n)$ decreases at a **decreasing rate** in $n$: more screening observations -reduce costs but with diminishing returns. +The function $c(n)$ decreases at a *decreasing rate* in $n$. + +More screening observations reduce costs but with diminishing returns. ```{code-cell} ipython3 -def cost_per_unit(n_vals, pi, c_bar=1.0): +--- +mystnb: + figure: + caption: Per unit costs by screening time + name: fig-cost-screening +--- +def cost_per_unit(n_vals, π, c_bar=1.0): """ - Per unit cost of production as a function of screening periods n. - - Parameters - ---------- - n_vals : array_like - Number of screening periods - pi : float - Prior precision - c_bar : float - Base cost c = c1 + c2 + c3 - - Returns - ------- - costs : array - Per unit costs c(n) + Per unit cost as a function of screening periods n. """ n_vals = np.asarray(n_vals, dtype=float) - return c_bar - 0.7978 * n_vals / (pi * (pi + n_vals)) + return c_bar - 0.7978 * n_vals / (π * (π + n_vals)) fig, ax = plt.subplots(figsize=(10, 6)) n_vals = np.linspace(0.1, 50, 200) -for pi in [0.5, 1.0, 2.0, 5.0]: - costs = cost_per_unit(n_vals, pi) - ax.plot(n_vals, costs, label=fr'$\pi = {pi}$') +for π in [0.5, 1.0, 2.0, 5.0]: + costs = cost_per_unit(n_vals, π) + ax.plot(n_vals, costs, lw=2, label=fr'$\pi = {π}$') -ax.set_xlabel('Screening periods $n$') -ax.set_ylabel('Per unit cost $c(n)$') -ax.set_title('Per Unit Costs Decrease with Screening Time') +ax.set_xlabel('screening periods $n$') +ax.set_ylabel('per unit cost $c(n)$') ax.legend() ax.set_xlim(0, 50) plt.tight_layout() @@ -321,10 +299,10 @@ The figure shows that: * the decrease is at a declining rate (diminishing returns to screening) * for smaller prior precision $\pi$ (more initial uncertainty about worker types), the gains from screening are larger -This diminishing-returns structure is the source of the **increasing costs of rapid adjustment**. +This diminishing-returns structure is the source of the *increasing costs of rapid adjustment*. -### Growth Rate and Screening Time +### Growth rate and screening time The greater the growth rate, the smaller must be $n$ --- the time spent in the screening task before assignment to job 2 or 3. @@ -352,104 +330,94 @@ n(\gamma) = \frac{\log(2\varphi) - \log(\varphi_1 + 2\varphi)}{\log(1 - \rho) - $$ (eq:n_gamma) ```{code-cell} ipython3 -def screening_time(gamma, rho, phi1, phi): +--- +mystnb: + figure: + caption: Screening time vs. growth rate + name: fig-screening-growth +--- +def screening_time(γ, ρ, φ1, φ): """ - Compute the screening time n as a function of growth rate γ. - - Parameters - ---------- - gamma : array_like - Growth rate of output - rho : float - Quit rate - phi1 : float - Fraction of workers in task 1 per unit output - phi : float - Fraction of workers in each of tasks 2, 3 per unit output - - Returns - ------- - n : array - Screening periods before assignment + Screening time n as a function of growth rate γ. """ - gamma = np.asarray(gamma, dtype=float) - numerator = np.log(2 * phi) - np.log(phi1 + 2 * phi) - denominator = np.log(1 - rho) - np.log(1 + gamma) + γ = np.asarray(γ, dtype=float) + numerator = np.log(2 * φ) - np.log(φ1 + 2 * φ) + denominator = np.log(1 - ρ) - np.log(1 + γ) return numerator / denominator - 1 -# Parameters -rho = 0.1 # quit rate -phi1 = 0.5 # fraction in screening -phi = 0.25 # fraction in each of tasks 2, 3 +ρ = 0.1 +φ1 = 0.5 +φ = 0.25 -gamma_vals = np.linspace(-0.05, 0.30, 200) +γ_vals = np.linspace(-0.05, 0.30, 200) -# Filter valid range: γ > -ρ and ensure n > 0 -valid = gamma_vals > -rho -gamma_valid = gamma_vals[valid] -n_vals = screening_time(gamma_valid, rho, phi1, phi) -# Only keep non-negative n +valid = γ_vals > -ρ +γ_valid = γ_vals[valid] +n_vals = screening_time(γ_valid, ρ, φ1, φ) mask = n_vals > 0 -gamma_plot = gamma_valid[mask] +γ_plot = γ_valid[mask] n_plot = n_vals[mask] fig, ax = plt.subplots(figsize=(10, 6)) -ax.plot(gamma_plot, n_plot, 'b-', linewidth=2) -ax.set_xlabel(r'Growth rate $\gamma$') -ax.set_ylabel(r'Screening periods $n(\gamma)$') -ax.set_title('Faster Growth Means Less Screening Time') -ax.set_xlim(gamma_plot[0], gamma_plot[-1]) +ax.plot(γ_plot, n_plot, lw=2) +ax.set_xlabel(r'growth rate $\gamma$') +ax.set_ylabel(r'screening periods $n(\gamma)$') +ax.set_xlim(γ_plot[0], γ_plot[-1]) plt.tight_layout() plt.show() ``` -The figure shows the key trade-off: **faster growth forces shorter screening periods**. +The figure shows the key trade-off: *faster growth forces shorter screening periods*. When growth is rapid, new workers must be promoted from the screening task to productive tasks more quickly, so less information is gathered about each worker before assignment. -### Combined Effect: Growth Rate and Per Unit Costs +### Combined effect: growth rate and per unit costs Composing the functions $c(n)$ and $n(\gamma)$ reveals how per unit costs depend on the growth rate: ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Per unit costs vs. growth rate + name: fig-cost-growth +--- fig, ax = plt.subplots(figsize=(10, 6)) -pi = 1.0 +π = 1.0 c_bar = 1.0 -# Compute per unit costs as function of growth rate -n_of_gamma = screening_time(gamma_plot, rho, phi1, phi) -costs_of_gamma = cost_per_unit(n_of_gamma, pi, c_bar) +n_of_γ = screening_time(γ_plot, ρ, φ1, φ) +costs_of_γ = cost_per_unit(n_of_γ, π, c_bar) -ax.plot(gamma_plot, costs_of_gamma, 'r-', linewidth=2) -ax.set_xlabel(r'Growth rate $\gamma$') -ax.set_ylabel(r'Per unit cost $c(n(\gamma))$') -ax.set_title('Per Unit Costs Increase with Growth Rate') -ax.set_xlim(gamma_plot[0], gamma_plot[-1]) +ax.plot(γ_plot, costs_of_γ, lw=2) +ax.set_xlabel(r'growth rate $\gamma$') +ax.set_ylabel(r'per unit cost $c(n(\gamma))$') +ax.set_xlim(γ_plot[0], γ_plot[-1]) plt.tight_layout() plt.show() ``` -This establishes the key result: **increasing costs of rapid adjustment arise endogenously** +This establishes the key result: *increasing costs of rapid adjustment arise endogenously* from the trade-off between screening and growth. The faster the firm grows, the less time it has to screen workers, the poorer the match between workers and tasks, and the higher the per unit production costs. -## Industry Equilibrium +## Industry equilibrium ```{index} single: Organization Capital; Industry Equilibrium ``` Firm growth rates are independent of firm size in this model because the mathematical structure of the technology constraint is the same as that -considered in {cite}`lucas1967adjustment`, except that the stock of organization capital +considered in {cite:t}`lucas1967adjustment`, except that the stock of organization capital is a vector rather than a scalar. The technology set facing price-taking firms is a **convex cone**: there are @@ -457,7 +425,7 @@ constant returns to scale. Constant returns and internal adjustment costs, along with some costs of transferring capital between firms, yield an optimum rate of firm growth -**independent of the firm's size** --- this is Gibrat's Law. +*independent of the firm's size* --- this is Gibrat's Law. The bounded, downward-sloping, inverse industry demand function is @@ -469,7 +437,7 @@ where $Q_t$ is the sum of output over all firms and $u_t$ is a demand shock subject to a stationary Markov process. Prescott and Visscher show that a competitive equilibrium exists using the -framework of {cite}`Lucas_Prescott_1971`. +framework of {cite:t}`Lucas_Prescott_1971`. The discounted consumer surplus to be maximized is @@ -481,76 +449,58 @@ where $A_{i2t}, A_{i3t}$, and $B$ are obtained by summing $a_{i2t}$, $a_{i3t}$, respectively, over all firms in the industry. -### Key Property: Growth Rates Independent of Size +### Key property: growth rates independent of size If two firms have organization capital vectors $\underline{k}$ that are proportional at a point in time, they will be proportional in all future periods. -That is, **growth rates are independent of firm size**. +That is, *growth rates are independent of firm size*. ```{code-cell} ipython3 -def simulate_firm_growth(T, gamma, rho, q0, seed=42): +--- +mystnb: + figure: + caption: Firm output levels and growth rates + name: fig-firm-growth +--- +def simulate_firm_growth(T, γ, ρ, q0, seed=42): """ - Simulate firm output growth with constant growth rate - and stochastic quit turnover. - - Parameters - ---------- - T : int - Number of periods - gamma : float - Equilibrium growth rate - rho : float - Quit rate - q0 : float - Initial output - seed : int - Random seed - - Returns - ------- - output : array - Firm output path + Simulate firm output growth with stochastic shocks. """ rng = np.random.default_rng(seed) output = np.zeros(T) output[0] = q0 for t in range(1, T): - # Stochastic growth around equilibrium rate shock = rng.normal(0, 0.02) - output[t] = output[t-1] * (1 + gamma + shock) + output[t] = output[t-1] * (1 + γ + shock) return output T = 50 -gamma_eq = 0.05 # equilibrium growth rate -rho = 0.1 +γ_eq = 0.05 +ρ = 0.1 -# Simulate firms of different initial sizes fig, axes = plt.subplots(1, 2, figsize=(14, 5)) -# Level plots ax = axes[0] for q0, label in [(10, 'Small firm'), (50, 'Medium firm'), (200, 'Large firm')]: - output = simulate_firm_growth(T, gamma_eq, rho, q0, + output = simulate_firm_growth(T, γ_eq, ρ, q0, seed=int(q0)) - ax.plot(range(T), output, label=f'{label} ($q_0={q0}$)') -ax.set_xlabel('Period') -ax.set_ylabel('Output $q_t$') -ax.set_title('Firm Output Levels') + ax.plot(range(T), output, lw=2, label=f'{label} ($q_0={q0}$)') +ax.set_xlabel('period') +ax.set_ylabel('output $q_t$') ax.legend() -# Log plots (growth rates) ax = axes[1] for q0, label in [(10, 'Small firm'), (50, 'Medium firm'), (200, 'Large firm')]: - output = simulate_firm_growth(T, gamma_eq, rho, q0, + output = simulate_firm_growth(T, γ_eq, ρ, q0, seed=int(q0)) - ax.plot(range(T), np.log(output), label=f'{label} ($q_0={q0}$)') -ax.set_xlabel('Period') + ax.plot(range(T), np.log(output), lw=2, + label=f'{label} ($q_0={q0}$)') +ax.set_xlabel('period') ax.set_ylabel(r'$\log(q_t)$') -ax.set_title('Log Output (Parallel = Equal Growth Rates)') ax.legend() plt.tight_layout() @@ -562,7 +512,7 @@ the log output paths are parallel. This is **Gibrat's Law**: growth rates are independent of firm size. -## Bayesian Screening Simulation +## Bayesian screening simulation ```{index} single: Organization Capital; Bayesian Screening ``` @@ -573,50 +523,29 @@ We draw workers from the population, observe their signals in the screening task and then assign them to the appropriate productive task based on the posterior mean. ```{code-cell} ipython3 -def simulate_screening(n_workers, n_screen, pi, seed=123): +--- +mystnb: + figure: + caption: Screening and worker assignment accuracy + name: fig-screening-assignment +--- +def simulate_screening(n_workers, n_screen, π, seed=123): """ - Simulate the screening and assignment of workers. - - Parameters - ---------- - n_workers : int - Number of workers to screen - n_screen : int - Number of screening periods per worker - pi : float - Prior precision of θ distribution - seed : int - Random seed - - Returns - ------- - results : dict - Dictionary with θ values, posterior means, - assignments, and misassignment rate + Simulate screening and assignment of workers. """ rng = np.random.default_rng(seed) - # Draw true worker types - theta = rng.normal(0, 1/np.sqrt(pi), n_workers) - - # Generate screening signals - signals = (theta[:, None] + θ = rng.normal(0, 1/np.sqrt(π), n_workers) + signals = (θ[:, None] + rng.normal(0, 1, (n_workers, n_screen))) + posterior_means = signals.sum(axis=1) / (π + n_screen) - # Compute posterior means after screening - posterior_means = signals.sum(axis=1) / (pi + n_screen) - - # Assign workers: m > 0 → task 2, m ≤ 0 → task 3 assignment = np.where(posterior_means > 0, 2, 3) - - # Correct assignment based on true θ - correct_assignment = np.where(theta > 0, 2, 3) - - # Misassignment rate + correct_assignment = np.where(θ > 0, 2, 3) misassignment_rate = np.mean(assignment != correct_assignment) return { - 'theta': theta, + 'theta': θ, 'posterior_means': posterior_means, 'assignment': assignment, 'correct_assignment': correct_assignment, @@ -624,7 +553,7 @@ def simulate_screening(n_workers, n_screen, pi, seed=123): } -pi = 1.0 +π = 1.0 n_workers = 5000 screening_periods = [1, 3, 5, 10, 20, 50] @@ -634,25 +563,23 @@ axes = axes.flatten() misassignment_rates = [] for idx, n_screen in enumerate(screening_periods): - results = simulate_screening(n_workers, n_screen, pi) + results = simulate_screening(n_workers, n_screen, π) misassignment_rates.append(results['misassignment_rate']) ax = axes[idx] - theta = results['theta'] + θ = results['theta'] m = results['posterior_means'] - # Color by whether assignment matches true type correct = results['assignment'] == results['correct_assignment'] - ax.scatter(theta[correct], m[correct], alpha=0.1, s=5, + ax.scatter(θ[correct], m[correct], alpha=0.1, s=5, color='blue', label='Correct') - ax.scatter(theta[~correct], m[~correct], alpha=0.3, s=5, + ax.scatter(θ[~correct], m[~correct], alpha=0.3, s=5, color='red', label='Misassigned') ax.axhline(0, color='k', linewidth=0.5) ax.axvline(0, color='k', linewidth=0.5) mis = results['misassignment_rate'] - ax.set_title(f'$n = {n_screen}$, misassign = {mis:.1%}') - ax.set_xlabel(r'True $\theta$') - ax.set_ylabel('Posterior mean $m$') + ax.set_xlabel(r'true $\theta$') + ax.set_ylabel('posterior mean $m$') if idx == 0: ax.legend(markerscale=5, loc='upper left') @@ -660,7 +587,7 @@ plt.tight_layout() plt.show() ``` -Red dots are workers who are **misassigned** --- placed in the wrong productive task +Red dots are workers who are *misassigned* --- placed in the wrong productive task because the posterior mean had the wrong sign relative to their true $\theta$. As $n$ increases: @@ -668,26 +595,31 @@ As $n$ increases: * Misassignment rates fall ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Misassignment rate by screening time + name: fig-misassignment-rate +--- fig, ax = plt.subplots(figsize=(10, 6)) n_range = np.arange(1, 51) mis_rates = [] for n_screen in n_range: - results = simulate_screening(n_workers, n_screen, pi) + results = simulate_screening(n_workers, n_screen, π) mis_rates.append(results['misassignment_rate']) -ax.plot(n_range, mis_rates, 'b-o', markersize=3) -ax.set_xlabel('Screening periods $n$') -ax.set_ylabel('Misassignment rate') -ax.set_title('Misassignment Rate Decreases with Screening Time') +ax.plot(n_range, mis_rates, '-o', markersize=3, lw=2) +ax.set_xlabel('screening periods $n$') +ax.set_ylabel('misassignment rate') plt.tight_layout() plt.show() ``` This confirms the theoretical prediction: the cost savings from better assignment -exhibit **diminishing returns** in the screening time $n$. +exhibit *diminishing returns* in the screening time $n$. -## Team Information +## Team information ```{index} single: Organization Capital; Team Information ``` @@ -695,7 +627,7 @@ exhibit **diminishing returns** in the screening time $n$. Personnel information need not be valuable only because it facilitates the matching of workers to tasks. -Another equally valuable use of personnel information is in the **matching of workers to workers**. +Another equally valuable use of personnel information is in the *matching of workers to workers*. What is important to performance in many activities within the firm is not just the aptitude of an individual assigned to a task, but also how well the @@ -726,47 +658,31 @@ $$ and precision $h = \pi + n$. If dissolution of a team also dissolves the accrued information, the team information -model has the **same mathematical structure** as the personnel information model. +model has the *same mathematical structure* as the personnel information model. ```{code-cell} ipython3 -def simulate_team_screening(n_teams, n_screen, pi, mu=0.5, +--- +mystnb: + figure: + caption: Team quality estimates by screening periods + name: fig-team-screening +--- +def simulate_team_screening(n_teams, n_screen, π, μ=0.5, seed=456): """ Simulate team screening with Bayesian updating. - - Parameters - ---------- - n_teams : int - Number of teams to screen - n_screen : int - Number of screening periods - pi : float - Prior precision - mu : float - Prior mean of team quality - seed : int - Random seed - - Returns - ------- - results : dict """ rng = np.random.default_rng(seed) - # True team qualities - theta = rng.normal(mu, 1/np.sqrt(pi), n_teams) - - # Generate signals - signals = (theta[:, None] + θ = rng.normal(μ, 1/np.sqrt(π), n_teams) + signals = (θ[:, None] + rng.normal(0, 1, (n_teams, n_screen))) - - # Posterior means z_bar = signals.mean(axis=1) - post_means = mu + n_screen * (z_bar - mu) / (pi + n_screen) - post_prec = pi + n_screen + post_means = μ + n_screen * (z_bar - μ) / (π + n_screen) + post_prec = π + n_screen return { - 'theta': theta, + 'theta': θ, 'posterior_means': post_means, 'posterior_precision': post_prec } @@ -775,16 +691,15 @@ def simulate_team_screening(n_teams, n_screen, pi, mu=0.5, fig, axes = plt.subplots(1, 3, figsize=(15, 5)) for idx, n_screen in enumerate([1, 5, 20]): - results = simulate_team_screening(500, n_screen, pi=1.0, mu=0.5) + results = simulate_team_screening(500, n_screen, π=1.0, μ=0.5) ax = axes[idx] ax.scatter(results['theta'], results['posterior_means'], alpha=0.4, s=10) lims = [-1.5, 2.5] - ax.plot(lims, lims, 'r--', alpha=0.5, label='45° line') - ax.set_xlabel(r'True team quality $\theta$') - ax.set_ylabel('Posterior mean $m$') - ax.set_title(f'$n = {n_screen}$ screening periods') + ax.plot(lims, lims, 'r--', alpha=0.5, lw=2, label='45° line') + ax.set_xlabel(r'true team quality $\theta$') + ax.set_ylabel('posterior mean $m$') ax.set_xlim(lims) ax.set_ylim(lims) ax.legend() @@ -796,11 +711,12 @@ plt.show() As with individual screening, more observations improve the precision of team quality estimates. + Rapid growth forces fewer observations before team assignments must be finalized, leading to higher costs. -## Firm-Specific Human Capital +## Firm-specific human capital ```{index} single: Organization Capital; Human Capital ``` @@ -812,9 +728,10 @@ determined largely by the level and meshing of the skills of the employees. ```{note} The case for the human capital of employees being part of the capital stock of the firm -is well established (see {cite}`Becker_1975`). Productivity in the future depends on levels -of human capital in the future, but to acquire human capital for the future, a sacrifice -in real resources is required in the present. +is well established (see {cite:t}`Becker_1975`). + +Productivity in the future depends on levels of human capital in the future, but to acquire +human capital for the future, a sacrifice in real resources is required in the present. ``` The key features are: @@ -827,47 +744,39 @@ The key features are: of the work force is improved * The transformation frontier between current output and future human capital is - **concave** and linearly homogeneous + *concave* and linearly homogeneous This gives the technology set the structure of a closed convex cone with a vertex at the origin --- sufficient for optimal proportional growth by firms. -### Concave Transformation Frontier +### Concave transformation frontier ```{code-cell} ipython3 -def transformation_frontier(q, alpha=0.7): +--- +mystnb: + figure: + caption: Concave transformation frontier + name: fig-transformation-frontier +--- +def transformation_frontier(q, α=0.7): """ - Concave transformation frontier between current output - and future human capital increment. - - Parameters - ---------- - q : array_like - Current output (fraction of capacity) - alpha : float - Concavity parameter - - Returns - ------- - hk : array - Future human capital increment + Concave transformation frontier between output and human capital. """ q = np.asarray(q, dtype=float) - return (1 - q**alpha)**(1/alpha) + return (1 - q**α)**(1/α) fig, ax = plt.subplots(figsize=(8, 8)) q_vals = np.linspace(0, 1, 200) -for alpha in [0.5, 0.7, 1.0, 1.5]: - hk = transformation_frontier(q_vals, alpha) +for α in [0.5, 0.7, 1.0, 1.5]: + hk = transformation_frontier(q_vals, α) ax.plot(q_vals, hk, - label=fr'$\alpha = {alpha}$', linewidth=2) + label=fr'$\alpha = {α}$', lw=2) -ax.set_xlabel('Current output $q$ (fraction of capacity)') -ax.set_ylabel('Future human capital increment $\\Delta h$') -ax.set_title('Concave Transformation Frontier') +ax.set_xlabel('current output $q$ (fraction of capacity)') +ax.set_ylabel('future human capital increment $\\Delta h$') ax.legend() ax.set_xlim(0, 1.05) ax.set_ylim(0, 1.05) @@ -884,7 +793,7 @@ But a workday consisting primarily of learning also has diminishing returns, creating the cost of rapid adjustment. -## Costs of Transferring Organization Capital +## Costs of transferring organization capital ```{index} single: Organization Capital; Transfer Costs ``` @@ -895,12 +804,12 @@ the model would not place constraints on the firm's growth rate. Firms could then merge, divest, or pirate each other's personnel without a cost penalty and thus produce a pattern of growth not restricted by the model. -Organization capital is **not** costlessly moved, however: +Organization capital is *not* costlessly moved, however: -1. **Moving is disruptive**: relocating from one locale to another is disruptive to both +1. *Moving is disruptive*: relocating from one locale to another is disruptive to both employee and family -2. **Information is firm-specific**: the information set that makes a person productive +2. *Information is firm-specific*: the information set that makes a person productive in one organization may not make that person as productive in another, even if both firms produce identical output @@ -908,62 +817,45 @@ Organization capital is **not** costlessly moved, however: * Knowing whom to ask when problems arise * Rapport with buyers or sellers -These are types of organization capital in one firm that **cannot be transferred costlessly** +These are types of organization capital in one firm that *cannot be transferred costlessly* to another. -## Summary and Implications +## Summary and implications The Prescott-Visscher model provides a unified framework in which: * The firm exists as an entity because it is an efficient structure for accumulating, storing, and using information -* **Constant returns to scale** arise because once the best combinations of worker types +* *Constant returns to scale* arise because once the best combinations of worker types are discovered, nothing prevents the firm from replicating those combinations with proportional gains in product -* **Increasing adjustment costs** arise endogenously from the trade-off between +* *Increasing adjustment costs* arise endogenously from the trade-off between current production and investment in organization capital -* **Gibrat's Law** --- growth rates independent of firm size --- is a natural implication +* *Gibrat's Law* --- growth rates independent of firm size --- is a natural implication -* Large firms should have growth rates that display **less variance** than small firms +* Large firms should have growth rates that display *less variance* than small firms because large firms are essentially portfolios of smaller production units ```{code-cell} ipython3 -# Illustrate the variance reduction in growth rates for large vs small firms -def simulate_growth_rate_distribution(n_firms, n_subunits, gamma, - sigma, T=100, seed=789): +--- +mystnb: + figure: + caption: Growth rate distributions by firm size + name: fig-growth-rate-dist +--- +def simulate_growth_rate_distribution(n_firms, n_subunits, γ, + σ, T=100, seed=789): """ Simulate growth rate distributions for firms of different sizes. - - Parameters - ---------- - n_firms : int - Number of firms to simulate - n_subunits : int - Number of independent subunits per firm - gamma : float - Mean growth rate - sigma : float - Std dev of growth rate per subunit - T : int - Number of periods - seed : int - Random seed - - Returns - ------- - growth_rates : array - Realized growth rates for each firm """ rng = np.random.default_rng(seed) - # Each firm's growth is average of n_subunit growth rates - subunit_growth = rng.normal(gamma, sigma, + subunit_growth = rng.normal(γ, σ, (n_firms, n_subunits, T)) - firm_growth = subunit_growth.mean(axis=1) # average across subunits - # Return time-averaged growth rate for each firm + firm_growth = subunit_growth.mean(axis=1) return firm_growth.mean(axis=1) @@ -973,33 +865,32 @@ sizes = {'Small (1 unit)': 1, 'Medium (5 units)': 5, 'Large (20 units)': 20} -gamma = 0.05 -sigma = 0.10 +γ = 0.05 +σ = 0.10 for label, n_sub in sizes.items(): rates = simulate_growth_rate_distribution( - 2000, n_sub, gamma, sigma) + 2000, n_sub, γ, σ) ax.hist(rates, bins=50, alpha=0.5, density=True, label=f'{label}: std={rates.std():.4f}') -ax.set_xlabel('Average growth rate') -ax.set_ylabel('Density') -ax.set_title('Growth Rate Distributions by Firm Size') +ax.set_xlabel('average growth rate') +ax.set_ylabel('density') ax.legend() -ax.axvline(gamma, color='k', linestyle='--', +ax.axvline(γ, color='k', linestyle='--', label=r'$\gamma$', alpha=0.5) plt.tight_layout() plt.show() ``` -The figure shows that although all firms have the **same mean growth rate** (Gibrat's Law), -large firms display **less variance** in realized growth rates because they are effectively +The figure shows that although all firms have the *same mean growth rate* (Gibrat's Law), +large firms display *less variance* in realized growth rates because they are effectively portfolios of independent subunits. -This is consistent with the empirical findings of {cite}`Mansfield_1962` and {cite}`Hymer_Pashigian_1962`. +This is consistent with the empirical findings of {cite:t}`Mansfield_1962` and {cite:t}`Hymer_Pashigian_1962`. The essence of the Prescott-Visscher theory is that the nature of the firm is tied to -**organization capital**. +*organization capital*. What distinguishes the firm from other relationships is that it is a structure within which agents have the incentive to acquire and reveal information in a manner that is less diff --git a/lectures/theil_2.md b/lectures/theil_2.md index cd844df39..3132c3860 100644 --- a/lectures/theil_2.md +++ b/lectures/theil_2.md @@ -642,7 +642,7 @@ mystnb: caption: Standard vs robust consumption paths name: fig-std-vs-robust-paths --- -np.random.seed(42) +np.random.seed(0) T_sim = 100 def simulate_ar1(φ, ν, shocks, mu0=0.0): From 45f32a399157d9cd13ab78a3ad9211dea1e0b352 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 4 Apr 2026 20:56:17 +1100 Subject: [PATCH 17/20] updates --- environment.yml | 4 +- lectures/_static/quant-econ.bib | 90 --------------------------------- 2 files changed, 2 insertions(+), 92 deletions(-) diff --git a/environment.yml b/environment.yml index 51df47972..dfd3a27ba 100644 --- a/environment.yml +++ b/environment.yml @@ -2,8 +2,8 @@ name: quantecon channels: - default dependencies: - - python=3.12 - - anaconda=2024.10 + - python=3.13 + - anaconda=2025.12 - pip - pip: - jupyter-book>=1.0.4post1,<2.0 diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 3a8a1e6ac..f0b24b9b8 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -216,21 +216,6 @@ @article{Negishi1960 } -### -QuantEcon Bibliography File used in conjuction with sphinxcontrib-bibtex package -Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ -### -%% References in the economics literature citing the -%% Blackwell--Dubins theorem on merging of opinions. -%% -%% Blackwell, D. and Dubins, L. (1962), -%% ``Merging of Opinions with Increasing Information,'' -%% Annals of Mathematical Statistics 33(3): 882--886. - -%% ------------------------------------------------------- -%% 1. The original paper -%% ------------------------------------------------------- - @article{BlackwellDubins1962, author = {Blackwell, David and Dubins, Lester}, title = {Merging of Opinions with Increasing Information}, @@ -242,11 +227,6 @@ @article{BlackwellDubins1962 doi = {10.1214/aoms/1177704456} } -%% ------------------------------------------------------- -%% 2. Sandroni (1998/2000): the paper you recalled --- -%% casts doubt on the economic relevance of absolute -%% continuity and of the Blackwell--Dubins result. -%% ------------------------------------------------------- @article{Sandroni2000, author = {Sandroni, Alvaro}, @@ -261,13 +241,6 @@ @article{Sandroni2000 doi = {10.1006/game.1999.0752} } -%% ------------------------------------------------------- -%% 3. Jackson, Kalai, and Smorodinsky (1999): the -%% Matthew Jackson paper you recalled. Uses -%% Blackwell--Dubins to study de~Finetti-style -%% representation of stochastic processes under -%% Bayesian learning. -%% ------------------------------------------------------- @article{JacksonKalaiSmorodinsky1999, author = {Jackson, Matthew O. and Kalai, Ehud and @@ -282,11 +255,6 @@ @article{JacksonKalaiSmorodinsky1999 doi = {10.1111/1468-0262.00053} } -%% ------------------------------------------------------- -%% 4. Kalai and Lehrer (1993a): ``Rational Learning -%% Leads to Nash Equilibrium.'' The seminal -%% application of Blackwell--Dubins to game theory. -%% ------------------------------------------------------- @article{KalaiLehrer1993Nash, author = {Kalai, Ehud and Lehrer, Ehud}, @@ -299,9 +267,6 @@ @article{KalaiLehrer1993Nash doi = {10.2307/2951492} } -%% ------------------------------------------------------- -%% 5. Kalai and Lehrer (1993b): Subjective equilibrium. -%% ------------------------------------------------------- @article{KalaiLehrer1993Subjective, author = {Kalai, Ehud and Lehrer, Ehud}, @@ -314,12 +279,6 @@ @article{KalaiLehrer1993Subjective doi = {10.2307/2951506} } -%% ------------------------------------------------------- -%% 6. Kalai and Lehrer (1994): Weak and strong merging. -%% Extends Blackwell--Dubins to weaker notions of -%% convergence; motivated by equilibrium convergence -%% in repeated games and dynamic economies. -%% ------------------------------------------------------- @article{KalaiLehrer1994Merging, author = {Kalai, Ehud and Lehrer, Ehud}, @@ -332,10 +291,6 @@ @article{KalaiLehrer1994Merging doi = {10.1016/0304-4068(94)90037-X} } -%% ------------------------------------------------------- -%% 7. Kalai, Lehrer, and Smorodinsky (1999): Calibrated -%% forecasting and merging. -%% ------------------------------------------------------- @article{KalaiLehrerSmorodinsky1999, author = {Kalai, Ehud and Lehrer, Ehud and Smorodinsky, Rann}, @@ -348,10 +303,6 @@ @article{KalaiLehrerSmorodinsky1999 doi = {10.1006/game.1999.0718} } -%% ------------------------------------------------------- -%% 8. Sandroni (1998): Almost absolute continuity and -%% convergence to Nash equilibrium. -%% ------------------------------------------------------- @article{Sandroni1998Nash, author = {Sandroni, Alvaro}, @@ -366,13 +317,6 @@ @article{Sandroni1998Nash doi = {10.1006/game.1997.0580} } -%% ------------------------------------------------------- -%% 9. Pomatto, Al-Najjar, and Sandroni (2014): Merging -%% and testing opinions. Extends Blackwell--Dubins -%% to finitely additive probabilities and studies -%% the connection between merging and the -%% manipulation of statistical tests. -%% ------------------------------------------------------- @article{PomattoAlNajjarSandroni2014, author = {Pomatto, Luciano and Al-Najjar, Nabil I. and @@ -386,10 +330,6 @@ @article{PomattoAlNajjarSandroni2014 doi = {10.1214/14-AOS1212} } -%% ------------------------------------------------------- -%% 10. Lehrer and Smorodinsky (1996a): Compatible measures -%% and merging. -%% ------------------------------------------------------- @article{LehrerSmorodinsky1996Compatible, author = {Lehrer, Ehud and Smorodinsky, Rann}, @@ -402,11 +342,6 @@ @article{LehrerSmorodinsky1996Compatible doi = {10.1287/moor.21.3.697} } -%% ------------------------------------------------------- -%% 11. Lehrer and Smorodinsky (1996b): Merging and -%% learning. In a volume on statistics, probability, -%% and game theory. -%% ------------------------------------------------------- @incollection{LehrerSmorodinsky1996Learning, author = {Lehrer, Ehud and Smorodinsky, Rann}, @@ -423,11 +358,6 @@ @incollection{LehrerSmorodinsky1996Learning year = {1996} } -%% ------------------------------------------------------- -%% 12. Nyarko (1994): Bayesian learning leads to -%% correlated equilibria. Uses Blackwell--Dubins -%% to prove convergence in normal-form games. -%% ------------------------------------------------------- @article{Nyarko1994, author = {Nyarko, Yaw}, @@ -441,12 +371,6 @@ @article{Nyarko1994 doi = {10.1007/BF01213814} } -%% ------------------------------------------------------- -%% 13. Jackson and Kalai (1999): Reputation vs. social -%% learning in recurring games; applies -%% Blackwell--Dubins and the rational-learning -%% literature. -%% ------------------------------------------------------- @article{JacksonKalai1999, author = {Jackson, Matthew O. and Kalai, Ehud}, @@ -459,13 +383,6 @@ @article{JacksonKalai1999 doi = {10.1006/jeth.1999.2542} } -%% ------------------------------------------------------- -%% 14. Acemoglu, Chernozhukov, and Yildiz (2016): -%% Learning and disagreement in an uncertain world. -%% Shows that Blackwell--Dubins-style agreement -%% can fail when agents are uncertain about signal -%% informativeness. -%% ------------------------------------------------------- @article{AcemogluChernozhukovYildiz2016, author = {Acemoglu, Daron and Chernozhukov, Victor and @@ -480,13 +397,6 @@ @article{AcemogluChernozhukovYildiz2016 doi = {10.3982/TE1156} } -%% ------------------------------------------------------- -%% 15. Diaconis and Freedman (1986): On the consistency -%% of Bayes estimates. A key probability paper -%% in the same intellectual tradition, often cited -%% alongside Blackwell--Dubins in the economics -%% learning literature. -%% ------------------------------------------------------- @article{DiaconisFreedman1986, author = {Diaconis, Persi and Freedman, David}, From 3ffc9270f096036291c5c8f6bca8a2bdf5cbe36c Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 4 Apr 2026 21:05:18 +1100 Subject: [PATCH 18/20] updates --- lectures/_static/quant-econ.bib | 179 ------------------------------ lectures/affine_risk_prices.md | 2 +- lectures/merging_of_opinions.md | 2 +- lectures/rational_expectations.md | 6 +- lectures/theil_2.md | 2 +- 5 files changed, 6 insertions(+), 185 deletions(-) diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index f0b24b9b8..967c3cc47 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -118,16 +118,6 @@ @article{Kogan_etal2017 year = {2017} } -@article{Harrison_Kreps1979, - author = {Harrison, J. Michael and Kreps, David M.}, - title = {Martingales and Arbitrage in Multiperiod Securities Markets}, - journal = {Journal of Economic Theory}, - volume = {20}, - number = {3}, - pages = {381--408}, - year = {1979} -} - @article{Kreps_Porteus1978, author = {Kreps, David M. and Porteus, Evan L.}, title = {Temporal Resolution of Uncertainty and Dynamic Choice Theory}, @@ -155,16 +145,6 @@ @book{Karlin_Taylor1981 year = {1981} } -@article{Bansal_Yaron2004, - author = {Bansal, Ravi and Yaron, Amir}, - title = {Risks for the Long Run: A Potential Resolution of Asset Pricing Puzzles}, - journal = {Journal of Finance}, - volume = {59}, - number = {4}, - pages = {1481--1509}, - year = {2004} -} - @article{Brunnermeier_etal2014, author = {Brunnermeier, Markus K. and Simsek, Alp and Xiong, Wei}, title = {A Welfare Criterion for Models with Distorted Beliefs}, @@ -215,19 +195,6 @@ @article{Negishi1960 year = {1960} } - -@article{BlackwellDubins1962, - author = {Blackwell, David and Dubins, Lester}, - title = {Merging of Opinions with Increasing Information}, - journal = {Annals of Mathematical Statistics}, - year = {1962}, - volume = {33}, - number = {3}, - pages = {882--886}, - doi = {10.1214/aoms/1177704456} -} - - @article{Sandroni2000, author = {Sandroni, Alvaro}, title = {The Role of Absolute Continuity in @@ -241,7 +208,6 @@ @article{Sandroni2000 doi = {10.1006/game.1999.0752} } - @article{JacksonKalaiSmorodinsky1999, author = {Jackson, Matthew O. and Kalai, Ehud and Smorodinsky, Rann}, @@ -255,7 +221,6 @@ @article{JacksonKalaiSmorodinsky1999 doi = {10.1111/1468-0262.00053} } - @article{KalaiLehrer1993Nash, author = {Kalai, Ehud and Lehrer, Ehud}, title = {Rational Learning Leads to {Nash} Equilibrium}, @@ -267,7 +232,6 @@ @article{KalaiLehrer1993Nash doi = {10.2307/2951492} } - @article{KalaiLehrer1993Subjective, author = {Kalai, Ehud and Lehrer, Ehud}, title = {Subjective Equilibrium in Repeated Games}, @@ -279,7 +243,6 @@ @article{KalaiLehrer1993Subjective doi = {10.2307/2951506} } - @article{KalaiLehrer1994Merging, author = {Kalai, Ehud and Lehrer, Ehud}, title = {Weak and Strong Merging of Opinions}, @@ -291,7 +254,6 @@ @article{KalaiLehrer1994Merging doi = {10.1016/0304-4068(94)90037-X} } - @article{KalaiLehrerSmorodinsky1999, author = {Kalai, Ehud and Lehrer, Ehud and Smorodinsky, Rann}, title = {Calibrated Forecasting and Merging}, @@ -303,7 +265,6 @@ @article{KalaiLehrerSmorodinsky1999 doi = {10.1006/game.1999.0718} } - @article{Sandroni1998Nash, author = {Sandroni, Alvaro}, title = {Necessary and Sufficient Conditions for @@ -317,7 +278,6 @@ @article{Sandroni1998Nash doi = {10.1006/game.1997.0580} } - @article{PomattoAlNajjarSandroni2014, author = {Pomatto, Luciano and Al-Najjar, Nabil I. and Sandroni, Alvaro}, @@ -330,7 +290,6 @@ @article{PomattoAlNajjarSandroni2014 doi = {10.1214/14-AOS1212} } - @article{LehrerSmorodinsky1996Compatible, author = {Lehrer, Ehud and Smorodinsky, Rann}, title = {Compatible Measures and Merging}, @@ -342,7 +301,6 @@ @article{LehrerSmorodinsky1996Compatible doi = {10.1287/moor.21.3.697} } - @incollection{LehrerSmorodinsky1996Learning, author = {Lehrer, Ehud and Smorodinsky, Rann}, title = {Merging and Learning}, @@ -358,7 +316,6 @@ @incollection{LehrerSmorodinsky1996Learning year = {1996} } - @article{Nyarko1994, author = {Nyarko, Yaw}, title = {Bayesian Learning Leads to Correlated Equilibria @@ -371,7 +328,6 @@ @article{Nyarko1994 doi = {10.1007/BF01213814} } - @article{JacksonKalai1999, author = {Jackson, Matthew O. and Kalai, Ehud}, title = {Reputation versus Social Learning}, @@ -383,7 +339,6 @@ @article{JacksonKalai1999 doi = {10.1006/jeth.1999.2542} } - @article{AcemogluChernozhukovYildiz2016, author = {Acemoglu, Daron and Chernozhukov, Victor and Yildiz, Muhamet}, @@ -397,7 +352,6 @@ @article{AcemogluChernozhukovYildiz2016 doi = {10.3982/TE1156} } - @article{DiaconisFreedman1986, author = {Diaconis, Persi and Freedman, David}, title = {On the Consistency of {Bayes} Estimates}, @@ -497,8 +451,6 @@ @article{Hymer_Pashigian_1962 year = {1962} } - - @article{blackwell1962, author = {Blackwell, David and Dubins, Lester E.}, title = {Merging of Opinions with Increasing Information}, @@ -558,17 +510,6 @@ @article{novikov1972 pages = {717--720}, } -@article{diaconis1986, - author = {Diaconis, Persi and Freedman, David}, - title = {On the Consistency of {Bayes} Estimates}, - journal = {Annals of Statistics}, - year = {1986}, - volume = {14}, - number = {1}, - pages = {1--26}, -} - - @inproceedings{blackwell1951, author = {Blackwell, David}, title = {Comparison of Experiments}, @@ -645,7 +586,6 @@ @inproceedings{hansen2004certainty year={2004} } - @article{evans2005interview, title={An interview with Thomas J. Sargent}, author={Evans, George W and Honkapohja, Seppo}, @@ -811,7 +751,6 @@ @article{alchian1950uncertainty publisher={The University of Chicago Press} } - @article{blume2006if, title={If you're so smart, why aren't you rich? Belief selection in complete and incomplete markets}, author={Blume, Lawrence and Easley, David}, @@ -832,7 +771,6 @@ @article{mendoza1998international publisher={JSTOR} } - @book{intriligator2002mathematical, title={Mathematical optimization and economic theory}, author={Intriligator, Michael D}, @@ -881,7 +819,6 @@ @article{Orcutt_Winokur_69 year = {1969} } - @incollection{Hurwicz:1962, address = {Stanford, CA}, author = {Hurwicz, Leonid}, @@ -894,7 +831,6 @@ @incollection{Hurwicz:1962 year = {1962} } - @article{Hurwicz:1966, abstract = {Publisher Summary This chapter concentrates on the structural form of interdependent systems. A great deal of effort is devoted in econometrics and elsewhere to find the behavior pattern of an observed configuration. Such effort is justified on the grounds that the knowledge of the behavior pattern is needed for the purpose of giving explanation or prediction. The merits of this justification are also examined in the chapter. At this point, the chapter considers certain difficulties encountered in the process of looking for the behavior patterns. In certain fields, notably economics (but also— for example, electronic network theory), it deals with a set (configuration) of objects (components) that are interdependent in their behavior. For purposes of both theoretical analysis and empirical investigation of such situations, the phenomena are often described in the chapter (in idealized form) by means of a system of simultaneous equations. History alone is not enabled to determine the behavior pattern of the configuration; but this does not mean that the task is hopeless. The priori information is obtained from the axiom systems or theories that are believed to be relevant to the behavior pattern of the configuration.}, author = {Leonid Hurwicz}, @@ -909,7 +845,6 @@ @article{Hurwicz:1966 year = {1966}, } - @article{hurwicz1950least, title = {Least squares bias in time series}, author = {Hurwicz, Leonid}, @@ -947,7 +882,6 @@ @article{warner1965randomized publisher = {Taylor \& Francis} } - @article{ljungqvist1993unified, title = {A unified approach to measures of privacy in randomized response models: A utilitarian perspective}, author = {Ljungqvist, Lars}, @@ -968,7 +902,6 @@ @article{lanke1976degree publisher = {JSTOR} } - @article{leysieffer1976respondent, title = {Respondent jeopardy and optimal designs in randomized response models}, author = {Leysieffer, Frederick W and Warner, Stanley L}, @@ -980,7 +913,6 @@ @article{leysieffer1976respondent publisher = {Taylor \& Francis} } - @article{anderson1976estimation, title = {Estimation of a proportion through randomized response}, author = {Anderson, Harald}, @@ -1012,7 +944,6 @@ @article{greenberg1977respondent publisher = {Elsevier} } - @article{greenberg1969unrelated, title = {The unrelated question randomized response model: Theoretical framework}, author = {Greenberg, Bernard G and Abul-Ela, Abdel-Latif A and Simmons, Walt R and Horvitz, Daniel G}, @@ -1035,8 +966,6 @@ @article{lanke1975choice publisher = {Taylor \& Francis} } - - @article{schmid2010, title = {Dynamic mode decomposition of numerical and experimental data}, author = {Schmid, Peter J}, @@ -1047,7 +976,6 @@ @article{schmid2010 publisher = {Cambridge University Press} } - @article{apostolakis1990, title = {The concept of probability in safety assessments of technological systems}, author = {Apostolakis, George}, @@ -1059,7 +987,6 @@ @article{apostolakis1990 publisher = {American Association for the Advancement of Science} } - @unpublished{Greenfield_Sargent_1993, author = {Moses A Greenfield and Thomas J Sargent}, title = {A Probabilistic Analysis of a Catastrophic Transuranic Waste Hoise Accident at the WIPP}, @@ -1079,8 +1006,6 @@ @article{Ardron_2018 year = {2018} } - - @article{Groves_73, author = {Groves, T.}, year = {1973}, @@ -1108,9 +1033,6 @@ @article{Vickrey_61 pages = {8-37} } - - - @article{Phelan_Townsend_91, author = {Christopher Phelan and Robert M. Townsend}, title = {{Computing Multi-Period, Information-Constrained Optima}}, @@ -1126,7 +1048,6 @@ @article{Phelan_Townsend_91 url = {https://ideas.repec.org/a/oup/restud/v58y1991i5p853-881..html} } - @article{Spear_Srivastava_87, author = {Stephen E. Spear and Sanjay Srivastava}, title = {{On Repeated Moral Hazard with Discounting}}, @@ -1142,7 +1063,6 @@ @article{Spear_Srivastava_87 url = {https://ideas.repec.org/a/oup/restud/v54y1987i4p599-617..html} } - @article{tu_Rowley, title = {On dynamic mode decomposition: Theory and applications}, author = {Tu, J. H. and Rowley, C. W. and Luchtenburg, D. M. and Brunton, S. L. and Kutz, J. N.}, @@ -1153,7 +1073,6 @@ @article{tu_Rowley pages = {391--421} } - @book{Knight:1921, author = {Knight, Frank H.}, date-added = {2020-08-20 10:29:34 -0500}, @@ -1164,7 +1083,6 @@ @book{Knight:1921 year = {1921} } - @article{MaccheroniMarinacciRustichini:2006b, author = {Maccheroni, Fabio and Marinacci, Massimo and Rustichini, Aldo}, date-added = {2021-05-19 08:04:27 -0500}, @@ -1193,7 +1111,6 @@ @article{GilboaSchmeidler:1989 year = {1989} } - @book{Sutton_2018, title={Reinforcement learning: An introduction}, author={Sutton, Richard S and Barto, Andrew G}, @@ -1216,7 +1133,6 @@ @article{AHS_2003 url = {https://ideas.repec.org/a/tpr/jeurec/v1y2003i1p68-123.html} } - @article{BHS_2009, author = {Barillas, Francisco and Hansen, Lars Peter and Sargent, Thomas J.}, title = {{Doubts or variability?}}, @@ -1232,8 +1148,6 @@ @article{BHS_2009 url = {https://ideas.repec.org/a/eee/jetheo/v144y2009i6p2388-2418.html} } - - @article{HST_1999, author = {Lars Peter Hansen and Thomas J. Sargent and Thomas D. Tallarini}, title = {{Robust Permanent Income and Pricing}}, @@ -1249,7 +1163,6 @@ @article{HST_1999 url = {https://ideas.repec.org/a/oup/restud/v66y1999i4p873-907..html} } - @article{simon1956dynamic, title={Dynamic programming under uncertainty with a quadratic criterion function}, author={Simon, Herbert A}, @@ -1278,9 +1191,6 @@ @article{Jacobson_73 pages = {124-131} } - - - @book{Bucklew_2004, title = {An Introduction to Rare Event Simulation}, author = {James A. Bucklew}, @@ -1289,8 +1199,6 @@ @book{Bucklew_2004 year = {2004} } - - @book{Whittle_1990, author = {Peter Whittle}, title = {Risk-Sensitive Optimal Control}, @@ -1299,7 +1207,6 @@ @book{Whittle_1990 address = {New York} } - @article{Whittle_1981, author = {Peter Whittle}, year = {1981}, @@ -1341,7 +1248,6 @@ @book{DDSE_book address = {New York} } - @book{bertsimas_tsitsiklis1997, author = {Bertsimas, D. & Tsitsiklis, J. N.}, title = {{Introduction to linear optimization}}, @@ -1357,7 +1263,6 @@ @book{hu_guo2018 year = {2018} } - @article{definetti, author = {Bruno de Finetti}, date-added = {2014-12-26 17:45:57 +0000}, @@ -1979,16 +1884,6 @@ @article{JuddYeltekinConklin2003 url = {https://ideas.repec.org/a/ecm/emetrp/v71y2003i4p1239-1254.html} } -@book{kreps, - author = {David M. Kreps}, - date-added = {2014-12-26 17:45:57 +0000}, - date-modified = {2014-12-26 17:45:57 +0000}, - publisher = {Westview Press}, - series = {Underground Classics in Economics}, - title = {Notes on the Theory of Choice}, - year = {1988} -} - @book{Kreps88, title = {Notes on the Theory of Choice}, author = {David M. Kreps}, @@ -2086,17 +1981,6 @@ @book{Lucas1987 publisher = {Oxford Blackwell} } -@article{hansen2009long, - title = {Long-term risk: An operator approach}, - author = {Hansen, Lars Peter and Scheinkman, Jos{\'e} A}, - journal = {Econometrica}, - volume = {77}, - number = {1}, - pages = {177--234}, - year = {2009}, - publisher = {Wiley Online Library} -} - @article{Hans_Scheink_2009, author = {Lars Peter Hansen and Jose A. Scheinkman}, title = {Long-Term Risk: An Operator Approach}, @@ -2108,13 +1992,6 @@ @article{Hans_Scheink_2009 month = {01} } -@book{hansen2008robustness, - title = {Robustness}, - author = {Hansen, Lars Peter and Sargent, Thomas J}, - year = {2008}, - publisher = {Princeton university press} -} - @book{Whittle1963, title = {Prediction and regulation by linear least-square methods}, author = {Whittle, Peter}, @@ -2641,7 +2518,6 @@ @article{hopenhayn1992entry publisher = {JSTOR} } - @book{bacsar2008h, title={H-infinity optimal control and related minimax design problems: a dynamic game approach}, author={Ba{\c{s}}ar, Tamer and Bernhard, Pierre}, @@ -2660,7 +2536,6 @@ @article{sargent1981interpreting publisher={The University of Chicago Press} } - @inproceedings{lucas1976econometric, title={Econometric policy evaluation: A critique}, author={Lucas, Robert E Jr}, @@ -2791,14 +2666,6 @@ @article{Lucas1978 year = {1978} } -@article{LucasPrescott1971, - author = {Lucas, Jr., Robert E and Prescott, Edward C}, - journal = {Econometrica: Journal of the Econometric Society}, - pages = {659--681}, - title = {{Investment under uncertainty}}, - year = {1971} -} - @article{LucasStokey1983, author = {Lucas, Jr., Robert E and Stokey, Nancy L}, journal = {Journal of monetary Economics}, @@ -3038,17 +2905,6 @@ @article{Schelling1969 year = {1969} } -@article{bansal2004risks, - title = {Risks for the long run: A potential resolution of asset pricing puzzles}, - author = {Bansal, Ravi and Yaron, Amir}, - journal = {The journal of Finance}, - volume = {59}, - number = {4}, - pages = {1481--1509}, - year = {2004}, - publisher = {Wiley Online Library} -} - @article{Bansal_Yaron_2004, author = {Ravi Bansal and Amir Yaron}, title = {{Risks for the Long Run: A Potential Resolution of Asset Pricing Puzzles}}, @@ -3075,31 +2931,6 @@ @article{hansen2008consumption publisher = {The University of Chicago Press} } -@article{HHL_2008, - author = {Lars Peter Hansen and John C. Heaton and Nan Li}, - title = {{Consumption Strikes Back? Measuring Long-Run Risk}}, - journal = {Journal of Political Economy}, - year = 2008, - volume = {116}, - number = {2}, - pages = {260-302}, - month = {04}, - keywords = {}, - doi = {}, - abstract = { We characterize and measure a long-term risk-return trade-off for the valuation of cash flows exposed to fluctuations in macroeconomic growth. This trade-off features risk prices of cash flows that are realized far into the future but continue to be reflected in asset values. We apply this analysis to claims on aggregate cash flows and to cash flows from value and growth portfolios by imputing values to the long-run dynamic responses of cash flows to macroeconomic shocks. We explore the sensitivity of our results to features of the economic valuation model and of the model cash flow dynamics. (c) 2008 by The University of Chicago. All rights reserved.}, - url = {https://ideas.repec.org/a/ucp/jpolec/v116y2008i2p260-302.html} -} - -@article{hansen2007beliefs, - title = {Beliefs, doubts and learning: Valuing macroeconomic risk}, - author = {Hansen, Lars Peter}, - journal = {American Economic Review}, - volume = {97}, - number = {2}, - pages = {1--30}, - year = {2007} -} - @article{Hansen_2007, author = {Lars Peter Hansen}, title = {{Beliefs, Doubts and Learning: Valuing Macroeconomic Risk}}, @@ -3115,16 +2946,6 @@ @article{Hansen_2007 url = {https://ideas.repec.org/a/aea/aecrev/v97y2007i2p1-30.html} } -@article{lucas2003macroeconomic, - title = {Macroeconomic priorities}, - author = {Lucas Jr, Robert E}, - journal = {American economic review}, - volume = {93}, - number = {1}, - pages = {1--14}, - year = {2003} -} - @article{Lucas_2003, author = {Lucas, Jr., Robert E}, title = {{Macroeconomic Priorities}}, diff --git a/lectures/affine_risk_prices.md b/lectures/affine_risk_prices.md index e17231b32..9c3964b9f 100644 --- a/lectures/affine_risk_prices.md +++ b/lectures/affine_risk_prices.md @@ -60,7 +60,7 @@ Instead, it assets to let the data reveal risks and their prices. ```{note} -Researchers including {cite}`bansal2004risks` and {cite}`hansen2008consumption` have been less willing +Researchers including {cite}`Bansal_Yaron_2004` and {cite}`hansen2008consumption` have been less willing to give up on consumption-based models of the stochastic discount factor. ``` diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md index 9ac7165ba..9e2958182 100644 --- a/lectures/merging_of_opinions.md +++ b/lectures/merging_of_opinions.md @@ -1150,7 +1150,7 @@ For example, in the Beta–Bernoulli model with a non-atomic prior $\pi$, the mi Global absolute continuity does hold under additional structure, for instance when the parameter space is finite or the model is sufficiently regular to admit a Doob-consistency argument. -{cite:t}`diaconis1986` study the consistency of Bayes estimates and show, among other results, that the interplay between local and global absolute continuity plays a central role in ensuring posterior convergence. +{cite:t}`DiaconisFreedman1986` study the consistency of Bayes estimates and show, among other results, that the interplay between local and global absolute continuity plays a central role in ensuring posterior convergence. When $P \perp Q^*$, there are events of probability one under $Q^*$ that have probability zero under $P$, so the agent's beliefs remain fundamentally misspecified. diff --git a/lectures/rational_expectations.md b/lectures/rational_expectations.md index c153ca79b..eaade4f14 100644 --- a/lectures/rational_expectations.md +++ b/lectures/rational_expectations.md @@ -42,7 +42,7 @@ tags: [hide-output] This lecture introduces the concept of a *rational expectations equilibrium*. To illustrate it, we describe a linear quadratic version of a model -due to Lucas and Prescott {cite}`LucasPrescott1971`. +due to Lucas and Prescott {cite}`Lucas_Prescott_1971`. That 1971 paper is one of a small number of research articles that ignited a *rational expectations revolution*. @@ -203,7 +203,7 @@ This type of outcome provides an intellectual justification for liking a competi References for this lecture include -* {cite}`LucasPrescott1971` +* {cite}`Lucas_Prescott_1971` * {cite}`Sargent1987`, chapter XIV * {cite}`Ljungqvist2012`, chapter 7 @@ -439,7 +439,7 @@ Fortunately, another method works here. The method exploits a connection between equilibrium and Pareto optimality expressed in the fundamental theorems of welfare economics (see, e.g, {cite}`MCWG1995`). -Lucas and Prescott {cite}`LucasPrescott1971` used this method to construct a rational expectations equilibrium. +Lucas and Prescott {cite}`Lucas_Prescott_1971` used this method to construct a rational expectations equilibrium. Some details follow. diff --git a/lectures/theil_2.md b/lectures/theil_2.md index 3132c3860..b342cda1f 100644 --- a/lectures/theil_2.md +++ b/lectures/theil_2.md @@ -268,7 +268,7 @@ His optimal rule takes the form u_t = \tilde{H}(x_t, z_t, Y_t). ``` -{cite:t}`bacsar2008h` and {cite:t}`hansen2008robustness` establish that at +{cite:t}`bacsar2008h` and {cite:t}`HansenSargent2008` establish that at equilibrium (with "big $K$ = little $k$" imposed) this collapses to ```{math} From 0ede827fcb0d83376e9f5f170857325f284e5224 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 4 Apr 2026 21:30:41 +1100 Subject: [PATCH 19/20] updates --- lectures/March_26_DMD_junk.md | 535 ------------------------- lectures/_static/quant-econ.bib | 62 +-- lectures/likelihood_ratio_process.md | 2 +- lectures/likelihood_ratio_process_2.md | 2 +- lectures/merging_of_opinions.md | 2 +- 5 files changed, 17 insertions(+), 586 deletions(-) delete mode 100644 lectures/March_26_DMD_junk.md diff --git a/lectures/March_26_DMD_junk.md b/lectures/March_26_DMD_junk.md deleted file mode 100644 index 7e2dcb890..000000000 --- a/lectures/March_26_DMD_junk.md +++ /dev/null @@ -1,535 +0,0 @@ - -## Old Stuff -- Pre March 26 - -We turn to the case in which $m >>n$ in which an $m \times n$ data matrix $\tilde X$ contains many more random variables $m$ than observations $n$. - -This **tall and skinny** case is associated with **Dynamic Mode Decomposition**. - -You can read about Dynamic Mode Decomposition here {cite}`DMD_book` and here {cite}`DDSE_book` (section 7.2). - -We start with an $m \times n $ matrix of data $\tilde X$ of the form - - -$$ - \tilde X = \begin{bmatrix} X_1 \mid X_2 \mid \cdots \mid X_n\end{bmatrix} -$$ - -where for $t = 1, \ldots, n$, the $m \times 1 $ vector $X_t$ is - -$$ X_t = \begin{bmatrix} X_{1,t} & X_{2,t} & \cdots & X_{m,t} \end{bmatrix}^T $$ - -where $T$ again denotes complex transposition and $X_{i,t}$ is an observation on variable $i$ at time $t$. - -From $\tilde X$, form two matrices - -$$ - X = \begin{bmatrix} X_1 \mid X_2 \mid \cdots \mid X_{n-1}\end{bmatrix} -$$ - -and - -$$ -X' = \begin{bmatrix} X_2 \mid X_3 \mid \cdots \mid X_n\end{bmatrix} -$$ - -Here $'$ does not denote matrix transposition but instead is part of the name of the matrix $X'$. - -In forming $ X$ and $X'$, we have in each case dropped a column from $\tilde X$, the last column in the case of $X$, and the first column in the case of $X'$. - -Evidently, $ X$ and $ X'$ are both $m \times \tilde n$ matrices where $\tilde n = n - 1$. - -We denote the rank of $X$ as $p \leq \min(m, \tilde n) = \tilde n$. - -We start with a system consisting of $m$ least squares regressions of **everything** on one lagged value of **everything**: - -$$ - X' = A X + \epsilon -$$ - -where $\epsilon$ is an $m \times m$ matrix of least squares residuals satisfying - -$$ -\epsilon X^+ = 0 -$$ - -and - -$$ -A = X' X^{+} . -$$ (eq:Afullformula) - -Here the (possibly huge) $\tilde n \times m $ matrix $X^{+}$ is the Moore-Penrose generalized inverse of $X$. - -The $i$th the row of $A$ is an $m \times 1$ vector of regression coefficients of $X_{i,t+1}$ on $X_{j,t}, j = 1, \ldots, m$. - - -Consider the (reduced) singular value decomposition - - $$ - X = U \Sigma V^T - $$ (eq:SVDforDMD) - - - -where $U$ is $m \times p$, $\Sigma$ is a $p \times p$ diagonal matrix, and $ V^T$ is a $p \times \tilde n$ matrix. - -Here $p$ is the rank of $X$, where necessarily $p \leq \tilde n$. - -(We described and illustrated a **reduced** singular value decomposition above, and compared it with a **full** singular value decomposition.) - -We could construct the generalized inverse $X^+$ of $X$ by using -a singular value decomposition $X = U \Sigma V^T$ to compute - -$$ -X^{+} = V \Sigma^{-1} U^T -$$ (eq:Xpinverse) - -where the matrix $\Sigma^{-1}$ is constructed by replacing each non-zero element of $ \Sigma$ with $\sigma_j^{-1}$. - -We could use formula {eq}`eq:Xpinverse` together with formula {eq}`eq:Afullformula` to compute the matrix $A$ of regression coefficients. - -Instead of doing that, we'll eventually use **dynamic mode decomposition** to compute a rank $r$ approximation to $A$, -where $r < p$. - - -The idea behind **dynamic mode decomposition** is to construct this low rank approximation to $A$ that - - -* constructs an $m \times r$ matrix $\Phi$ that captures effects on all $m$ variables of $r \leq p$ **modes** that are associated with the $r$ largest eigenvalues of $A$ - - -* uses $\Phi$, the current value of $X_t$, and powers of the $r$ largest eigenvalues of $A$ to forecast *future* $X_{t+j}$'s - - - - -## Analysis - -We'll put basic ideas on the table by starting with the special case in which $r = p$ so that we retain -all $p$ singular values of $X$. - -(Later, we'll retain only $r < p$ of them) - -When $r = p$, formula -{eq}`eq:Xpinverse` for $X^+$ implies that - - -$$ -A = X' V \Sigma^{-1} U^T -$$ (eq:Aformbig) - -where $V$ is an $\tilde n \times p$ matrix, $\Sigma^{-1}$ is a $p \times p$ matrix, $U^T$ is a $p \times m$ matrix, -and $U^T U = I_p$ and $V V^T = I_m $. - - -It is convenient to represent $A$ as computed in equation {eq}`eq:Aformbig` as - -$$ -A = U \tilde A U^T -$$ (eq:Afactortilde) - -where the $p \times p$ transition matrix $\tilde A$ can be recovered from - -$$ - \tilde A = U^T A U = U^T X' V \Sigma^{-1} . -$$ (eq:Atilde0) - -We use the $p$ columns of $U$, and thus the $p$ rows of $U^T$, to define a $p \times 1$ vector $\tilde X_t$ as follows - - -$$ -\tilde X_t = U^T X_t . -$$ (eq:tildeXdef2) - -Since $U U^T$ is an $m \times m$ identity matrix, it follows from equation {eq}`eq:tildeXdef2` that we can reconstruct $X_t$ from $\tilde X_t$ by using - -$$ -X_t = U \tilde X_t . -$$ (eq:Xdecoder) - - - * Equation {eq}`eq:tildeXdef2` serves as an **encoder** that summarizes the $m \times 1$ vector $X_t$ by a $p \times 1$ vector $\tilde X_t$ - - * Equation {eq}`eq:Xdecoder` serves as a **decoder** that recovers the $m \times 1$ vector $X_t$ from the $p \times 1$ vector $\tilde X_t$ - - - -Because $U^T U = I_p$, we have - -$$ -\tilde X_{t+1} = \tilde A \tilde X_t -$$ (eq:xtildemotion) - -Notice that if we multiply both sides of {eq}`eq:xtildemotion` by $U$ -we get - -$$ -U \tilde X_{t+1} = U \tilde A \tilde X_t = U \tilde A U^T X_t -$$ - -which by virtue of decoder equation {eq}`eq:xtildemotion` recovers - -$$ -X_{t+1} = A X_t . -$$ - - - - - - -It is useful to construct an eigencomposition of the $p \times p$ transition matrix $\tilde A$ defined -in equation in {eq}`eq:Atilde0` above: - -$$ - \tilde A W = W \Lambda -$$ (eq:tildeAeigen) - -where $\Lambda$ is a $r \times r$ diagonal matrix of eigenvalues and the columns of $W$ are corresponding eigenvectors -of $\tilde A$. - -Both $\Lambda$ and $W$ are $p \times p$ matrices. - -Construct the $m \times p$ matrix - -$$ - \Phi = X' V \Sigma^{-1} W -$$ (eq:Phiformula) - - - -Tu et al. {cite}`tu_Rowley` established the following - -**Proposition** The $r$ columns of $\Phi$ are eigenvectors of $A$ that correspond to the largest $r$ eigenvalues of $A$. - -**Proof:** From formula {eq}`eq:Phiformula` we have - -$$ -\begin{aligned} - A \Phi & = (X' V \Sigma^{-1} U^T) (X' V \Sigma^{-1} W) \cr - & = X' V \Sigma^{-1} \tilde A W \cr - & = X' V \Sigma^{-1} W \Lambda \cr - & = \Phi \Lambda - \end{aligned} -$$ - -Thus, we have deduced that - -$$ -A \Phi = \Phi \Lambda -$$ (eq:APhiLambda) - -Let $\phi_i$ be the the $i$the column of $\Phi$ and $\lambda_i$ be the corresponding $i$ eigenvalue of $\tilde A$ from decomposition {eq}`eq:tildeAeigen`. - -Writing out the $m \times p$ vectors on both sides of equation {eq}`eq:APhiLambda` and equating them gives - - -$$ -A \phi_i = \lambda_i \phi_i . -$$ - -Thus, $\phi_i$ is an eigenvector of $A$ that corresponds to eigenvalue $\lambda_i$ of $A$. - -This concludes the proof. - - -Also see {cite}`DDSE_book` (p. 238) - - -### Two Representations of $A$ - -We have constructed two representations of (or approximations to) $A$. - -One from equation {eq}`eq:Afactortilde` is - -$$ -A = U \tilde A U^T -$$ (eq:Aform11) - -while from equation the eigen decomposition {eq}`eq:APhiLambda` the other is - -$$ -A = \Phi \Lambda \Phi^+ -$$ (eq:Aform12) - - -From formula {eq}`eq:Aform11` we can deduce - -$$ -\tilde X_{t+1} = \tilde A \tilde X_t -$$ - -where - -$$ -\begin{aligned} -\tilde X_t & = U^T X_t \cr -X_t & = U \tilde X_t -\end{aligned} -$$ - - -From formula {eq}`eq:Aform12` we can deduce - -$$ -b_{t+1} = \Lambda b_t -$$ - -where - -$$ -\begin{aligned} -b_t & = \Phi^+ X_t \cr -X_t & = \Phi b_t -\end{aligned} -$$ - - -There is better formula for the $p \times 1$ vector $b_t$ - -In particular, the following argument from {cite}`DDSE_book` (page 240) provides a computationally efficient way -to compute $b_t$. - -For convenience, we'll do this first for time $t=1$. - - - -For $t=1$, we have - -$$ - X_1 = \Phi b_1 -$$ (eq:X1proj) - -where $b_1$ is a $p \times 1$ vector. - -Since $X_1 = U \tilde X_1$, it follows that - -$$ - U \tilde X_1 = X' V \Sigma^{-1} W b_1 -$$ - -and - -$$ - \tilde X_1 = U^T X' V \Sigma^{-1} W b_1 -$$ - -Recall that $ \tilde A = U^T X' V \Sigma^{-1}$ so that - -$$ - \tilde X_1 = \tilde A W b_1 -$$ - -and therefore, by the eigendecomposition {eq}`eq:tildeAeigen` of $\tilde A$, we have - -$$ - \tilde X_1 = W \Lambda b_1 -$$ - -Therefore, - -$$ - b_1 = ( W \Lambda)^{-1} \tilde X_1 -$$ - -or - - -$$ - b_1 = ( W \Lambda)^{-1} U^T X_1 -$$ (eq:beqnsmall) - - - -which is computationally more efficient than the following instance of our earlier equation for computing the initial vector $b_1$: - -$$ - b_1= \Phi^{+} X_1 -$$ (eq:bphieqn) - - -Conditional on $X_t$, we can construct forecasts $\check X_{t+j} $ of $X_{t+j}, j = 1, 2, \ldots, $ from -either - -$$ -\check X_{t+j} = \Phi \Lambda^j \Phi^{+} X_t -$$ (eq:checkXevoln) - - -or the following equation - -$$ - \check X_{t+j} = \Phi \Lambda^j (W \Lambda)^{-1} U^T X_t -$$ (eq:checkXevoln2) - - - -### Using Fewer Modes - -The preceding formulas assume that we have retained all $p$ modes associated with the positive -singular values of $X$. - -We can easily adapt all of the formulas to describe a situation in which we instead retain only -the $r < p$ largest singular values. - -In that case, we simply replace $\Sigma$ with the appropriate $r \times r$ matrix of singular values, -$U$ with the $m \times r$ matrix of whose columns correspond to the $r$ largest singular values, -and $V$ with the $\tilde n \times r$ matrix whose columns correspond to the $r$ largest singular values. - -Counterparts of all of the salient formulas above then apply. - - - - - -## Reduced-order VAR - -DMD is a natural tool for estimating a **reduced order vector autoregression**, -an object that we define in terms of the population regression equation - -$$ -X_{t+1} = \check A X_t + C \epsilon_{t+1} -$$ (eq:VARred) - -where - -* $X_t$ is an $m \times 1$ vector -* $\check A$ is an $m \times m$ matrix of rank $r$ whose eigenvalues are all less than $1$ in modulus -* $\epsilon_{t+1} \sim {\mathcal N}(0, I)$ is an $m \times 1$ vector of i.i.d. shocks -* $E \epsilon_{t+1} X_t^T = 0$, so that all shocks are orthogonal to all regressors - -To link this model to a dynamic mode decomposition (DMD), again take - -$$ -X = [ X_1 \mid X_2 \mid \cdots \mid X_{n-1} ] -$$ - -$$ -X' = [ X_2 \mid X_3 \mid \cdots \mid X_n ] -$$ - -so that according to model {eq}`eq:VARred` - - -$$ -X' = \begin{bmatrix} \check A X_1 + C \epsilon_2 \mid \check A X_2 + C \epsilon_3 \mid \cdots \mid \check A X_{n-1} + C -\epsilon_n \end{bmatrix} -$$ - -To illustrate some useful calculations, assume that $n =3 $ and form - -$$ -X' X^T = \begin{bmatrix} \check A X_1 + C \epsilon_2 & \check A X_2 + C \epsilon_3 \end{bmatrix} - \begin{bmatrix} X_1^T \cr X_2^T \end{bmatrix} -$$ - -or - -$$ -X' X^T = \check A ( X_1 X_1^T + X_2 X_2^T) + C( \epsilon_2 X_1^T + \epsilon_3 X_2^T) -$$ - -but because - -$$ -E ( \epsilon_2 X_1^T + \epsilon_3 X_2^T) = 0 -$$ - -we have - -$$ -X' X^T = \check A ( X_1 X_1^T + X_2 X_2^T) -$$ - -Evidently, - -$$ -X X^T = ( X_1 X_1^T + X_2 X_2^T) -$$ - -so that our matrix $\check A$ of least squares regression coefficients is - -$$ -\check A = (X' X^T) (X X^T)^+ -$$ - -Our **assumption** that $\check A$ is a matrix of rank $r$ leads us to represent it as - -$$ -\check A = \Phi \Lambda \Phi^{+} -$$ - -where $\Phi$ and $\Lambda$ are computed with the DMD algorithm described above. - -Associated with the VAR representation {eq}`eq:VARred` -is the usual moving average representation - -$$ -X_{t+j} = \check A^j X_t + C \epsilon_{t+j} + \check A C \epsilon_{t+j-1} + \cdots \check A^{j-1} \epsilon_{t+1} -$$ - -After computing $\check A$, we can construct sample versions -of - -$$ -C \epsilon_{t+1} = X_{t+1} - \check A X_t , \quad t =1, \ldots, n-1 -$$ - -and check whether they are serially uncorrelated as assumed in {eq}`eq:VARred`. - -For example, we can compute spectra and cross-spectra of components of $C \epsilon_{t+1}$ -and check for serial-uncorrelatedness in the usual ways. - -We can also estimate the covariance matrix of $C \epsilon_{t+1}$ -from - -$$ -\frac{1}{n-1} \sum_{t=1}^{n-1} (C \epsilon_{t+1} )( C \epsilon_{t+1})^T -$$ - -It can be enlightening to diagonize our reduced order VAR {eq}`eq:VARred` by noting that it can -be written - - -$$ -X_{t+1} = \Phi \Lambda \Phi^{+} X_t + C \epsilon_{t+1} -$$ - - -and then writing it as - -$$ -\Phi^+ X_{t+1} = \Lambda \Phi^{+} X_t + \Phi^+ C \epsilon_{t+1} -$$ - -or - -$$ -\bar X_{t+1} = \Lambda \bar X_t + \bar \epsilon_{t+1} -$$ (eq:VARmodes) - -where $\bar X_t $ is an $r \times 1$ **mode** and $\bar \epsilon_{t+1}$ is an $r \times 1$ -shock. - -The $r$ modes $\bar X_t$ obey the first-order VAR {eq}`eq:VARmodes` in which $\Lambda$ is an $r \times r$ diagonal matrix. - -Note that while $\Lambda$ is diagonal, the contemporaneous covariance matrix of $\bar \epsilon_{t+1}$ need not be. - - -**Remark:** It is permissible for $X_t$ to contain lagged values of observables. - - For example, we might have a setting in which - -$$ -X_t = \begin{bmatrix} -y_{1t} \cr -y_{1,t-1} \cr -\vdots \cr -y_{1, t-k}\cr -y_{2,t} \cr -y_{2, t-1} \cr -\vdots -\end{bmatrix} -$$ - -+++ diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 967c3cc47..82f5cc7ec 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -195,13 +195,13 @@ @article{Negishi1960 year = {1960} } -@article{Sandroni2000, - author = {Sandroni, Alvaro}, +@article{MillerSanchirico1999, + author = {Miller, Ronald I. and Sanchirico, Chris William}, title = {The Role of Absolute Continuity in ``{Merging} of {Opinions}'' and ``{Rational} {Learning}''}, journal = {Games and Economic Behavior}, - year = {2000}, + year = {1999}, volume = {29}, number = {1--2}, pages = {170--190}, @@ -218,7 +218,7 @@ @article{JacksonKalaiSmorodinsky1999 volume = {67}, number = {4}, pages = {875--893}, - doi = {10.1111/1468-0262.00053} + doi = {10.1111/1468-0262.00055} } @article{KalaiLehrer1993Nash, @@ -240,7 +240,7 @@ @article{KalaiLehrer1993Subjective volume = {61}, number = {5}, pages = {1231--1240}, - doi = {10.2307/2951506} + doi = {10.2307/2951500} } @article{KalaiLehrer1994Merging, @@ -262,7 +262,7 @@ @article{KalaiLehrerSmorodinsky1999 volume = {29}, number = {1--2}, pages = {151--169}, - doi = {10.1006/game.1999.0718} + doi = {10.1006/game.1998.0608} } @article{Sandroni1998Nash, @@ -275,14 +275,14 @@ @article{Sandroni1998Nash volume = {22}, number = {1}, pages = {121--147}, - doi = {10.1006/game.1997.0580} + doi = {10.1006/game.1997.0572} } @article{PomattoAlNajjarSandroni2014, author = {Pomatto, Luciano and Al-Najjar, Nabil I. and Sandroni, Alvaro}, title = {Merging and Testing Opinions}, - journal = {Annals of Statistics}, + journal = {The Annals of Statistics}, year = {2014}, volume = {42}, number = {3}, @@ -308,12 +308,13 @@ @incollection{LehrerSmorodinsky1996Learning Papers in Honor of {David Blackwell}}, editor = {Ferguson, Thomas S. and Shapley, Lloyd S. and MacQueen, James B.}, - series = {{IMS} Lecture Notes---Monograph Series}, + series = {{IMS} Lecture Notes -- Monograph Series}, volume = {30}, pages = {147--168}, publisher = {Institute of Mathematical Statistics}, address = {Hayward, CA}, - year = {1996} + year = {1996}, + doi = {10.1214/lnms/1215453571} } @article{Nyarko1994, @@ -336,7 +337,7 @@ @article{JacksonKalai1999 volume = {88}, number = {1}, pages = {40--59}, - doi = {10.1006/jeth.1999.2542} + doi = {10.1006/jeth.1999.2538} } @article{AcemogluChernozhukovYildiz2016, @@ -349,13 +350,13 @@ @article{AcemogluChernozhukovYildiz2016 volume = {11}, number = {1}, pages = {187--225}, - doi = {10.3982/TE1156} + doi = {10.3982/TE436} } @article{DiaconisFreedman1986, author = {Diaconis, Persi and Freedman, David}, title = {On the Consistency of {Bayes} Estimates}, - journal = {Annals of Statistics}, + journal = {The Annals of Statistics}, year = {1986}, volume = {14}, number = {1}, @@ -751,16 +752,6 @@ @article{alchian1950uncertainty publisher={The University of Chicago Press} } -@article{blume2006if, - title={If you're so smart, why aren't you rich? Belief selection in complete and incomplete markets}, - author={Blume, Lawrence and Easley, David}, - journal={Econometrica}, - volume={74}, - number={4}, - pages={929--966}, - year={2006}, - publisher={Wiley Online Library} -} @article{mendoza1998international, title={The international ramifications of tax reforms: supply-side economics in a global economy}, @@ -1657,13 +1648,6 @@ @article{rosen1994cattle publisher = {The University of Chicago Press} } -@book{HS2013, - title = {Recursive Linear Models of Dynamic Economics}, - author = {Hansen, Lars Peter and Thomas J. Sargent}, - year = {2013}, - publisher = {Princeton University Press}, - address = {Princeton, New Jersey} -} @article{Reffett1996, title = {Production-based asset pricing in monetary economies with transactions costs}, @@ -3193,14 +3177,6 @@ @techreport{giannoni2010optimal institution = {National Bureau of Economic Research} } -@article{miller1985dynamic, - title = {Dynamic games and the time inconsistency of optimal policy in open economies}, - author = {Miller, Marcus and Salmon, Mark}, - journal = {The Economic Journal}, - pages = {124--137}, - year = {1985}, - publisher = {JSTOR} -} @article{pearlman1986rational, title = {Rational expectations models with partial information}, @@ -3312,16 +3288,6 @@ @article{kikuchi2018span publisher = {Wiley Online Library} } -@article{coase1937nature, - title = {The nature of the firm}, - author = {Coase, Ronald Harry}, - journal = {economica}, - volume = {4}, - number = {16}, - pages = {386--405}, - year = {1937}, - publisher = {Wiley Online Library} -} @article{do1999solutions, title = {Solutions for the linear-quadratic control problem of Markov jump linear systems}, diff --git a/lectures/likelihood_ratio_process.md b/lectures/likelihood_ratio_process.md index 687f16f14..0bdb94d02 100644 --- a/lectures/likelihood_ratio_process.md +++ b/lectures/likelihood_ratio_process.md @@ -1725,7 +1725,7 @@ markov_results = analyze_markov_chains(P_f, P_g) Likelihood processes play an important role in Bayesian learning, as described in {doc}`likelihood_bayes` and as applied in {doc}`odu`. -Likelihood ratio processes are central to Lawrence Blume and David Easley's answer to their question "If you're so smart, why aren't you rich?" {cite}`blume2006if`, the subject of the lecture{doc}`likelihood_ratio_process_2`. +Likelihood ratio processes are central to Lawrence Blume and David Easley's answer to their question "If you're so smart, why aren't you rich?" {cite}`Blume_Easley2006`, the subject of the lecture{doc}`likelihood_ratio_process_2`. Likelihood ratio processes also appear in {doc}`advanced:additive_functionals`, which contains another illustration of the **peculiar property** of likelihood ratio processes described above. diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index cf37fb979..29b966e87 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -29,7 +29,7 @@ kernelspec: ## Overview A likelihood ratio process lies behind Lawrence Blume and David Easley's answer to their question -"If you're so smart, why aren't you rich?" {cite}`blume2006if`. +"If you're so smart, why aren't you rich?" {cite}`Blume_Easley2006`. Blume and Easley constructed formal models to study how differences of opinions about probabilities governing risky income processes would influence outcomes and be reflected in prices of stocks, bonds, and insurance policies that individuals use to share and hedge risks. diff --git a/lectures/merging_of_opinions.md b/lectures/merging_of_opinions.md index 9e2958182..a8812675e 100644 --- a/lectures/merging_of_opinions.md +++ b/lectures/merging_of_opinions.md @@ -1295,7 +1295,7 @@ Some influential applications and extensions are: - {cite}`JacksonKalaiSmorodinsky1999`: de Finetti-style representations are connected to Bayesian learning and posterior convergence. - {cite}`JacksonKalai1999`: social learning erodes reputational effects that rely on persistent disagreement across cohorts. - {cite}`Sandroni1998Nash`: near-absolute-continuity conditions are shown to suffice for Nash-type convergence in repeated games. -- {cite}`Sandroni2000`: gives an alternative proof and an economic interpretation of persistent disagreement in terms of mutually favorable bets. +- {cite}`MillerSanchirico1999`: gives an alternative proof and an economic interpretation of persistent disagreement in terms of mutually favorable bets. - {cite}`LehrerSmorodinsky1996Compatible`: studies broader compatibility notions beyond Blackwell--Dubins absolute continuity. - {cite}`LehrerSmorodinsky1996Learning`: surveys merging and learning in repeated strategic environments. - {cite}`Nyarko1994`: relates Bayesian learning under absolute continuity to convergence toward correlated equilibrium. From 5d92aaad8bd1f558625c49a711b8fb6101235387 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 4 Apr 2026 21:42:45 +1100 Subject: [PATCH 20/20] updates --- lectures/survival_recursive_preferences.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/survival_recursive_preferences.md b/lectures/survival_recursive_preferences.md index 0548d3323..6d1053307 100644 --- a/lectures/survival_recursive_preferences.md +++ b/lectures/survival_recursive_preferences.md @@ -82,7 +82,7 @@ Under the true probability measure $P$, aggregate endowment satisfies $$ d \log Y_t = \mu_Y dt + \sigma_Y dW_t, \quad Y_0 > 0 -$$ (eq:endowment) +$$ (eq:srp_endowment) where $W$ is a standard Brownian motion, $\mu_Y$ is the drift, and $\sigma_Y > 0$ is the volatility.