summaryrefslogtreecommitdiff
path: root/ictp-saifr_colloquium.tex
diff options
context:
space:
mode:
authorJaron Kent-Dobias <jaron@kent-dobias.com>2025-02-11 10:50:02 -0300
committerJaron Kent-Dobias <jaron@kent-dobias.com>2025-02-11 10:50:02 -0300
commit4098321621e5b30b55ee4d406cf3c8bb68318914 (patch)
treefdfe8b72783512f301328a358180e6be5f2a7017 /ictp-saifr_colloquium.tex
parentd01b7451f7d47429f60c0a7715592716acf18d9b (diff)
downloadictp-saifr_colloquium-4098321621e5b30b55ee4d406cf3c8bb68318914.tar.gz
ictp-saifr_colloquium-4098321621e5b30b55ee4d406cf3c8bb68318914.tar.bz2
ictp-saifr_colloquium-4098321621e5b30b55ee4d406cf3c8bb68318914.zip
More standardize notation.
Diffstat (limited to 'ictp-saifr_colloquium.tex')
-rw-r--r--ictp-saifr_colloquium.tex21
1 files changed, 13 insertions, 8 deletions
diff --git a/ictp-saifr_colloquium.tex b/ictp-saifr_colloquium.tex
index 8611f15..2d621f2 100644
--- a/ictp-saifr_colloquium.tex
+++ b/ictp-saifr_colloquium.tex
@@ -85,19 +85,20 @@
\begin{columns}
\begin{column}{0.4\textwidth}
- Pick a basis of $N$ functions $b_1(x), \ldots, b_N(x)$
+ Pick a basis of $N$ functions
+ \[b_1(x), \ldots, b_N(x)\]
- \bigskip
+ \smallskip
Approximate the ground truth
\[
\hat f(x\mid a_1,\ldots, a_N)=\sum_{j=1}^Na_jb_j(x)
\]
- Find $a_1, \ldots, a_N$ minimizing
+ Find $\pmb a=[a_1, \ldots, a_N]$ minimizing
\[
- \chi^2
- =\sum_{i=1}^M\left(y_i-\sum_{j=1}^Na_jb_j(x_i)\right)^2
+ \chi^2(\pmb a\mid\pmb x,\pmb y)
+ =\sum_{i=1}^M\left(y_i-\hat f(x_i\mid\pmb a)\right)^2
\]
\end{column}
\begin{column}{0.6\textwidth}
@@ -193,7 +194,7 @@
\begin{column}{0.5\textwidth}
Knowing the ground truth, fit error is
\[
- \text{MSE}=\int dx\left(f(x)-\sum_{j=1}^Na_jb_j(x)\right)^2
+ \text{MSE}=\int dx\left(f(x)-\hat f(x\mid\pmb a)\right)^2
\]
\smallskip
@@ -237,12 +238,16 @@
Fit function is a neural network:
\[
- \hat f(\mathbf x\mid B_1,\ldots B_L)=\sigma\left(B_L \sigma\left( B_{L-1}\cdots\sigma\left(B_2\sigma (B_1\mathbf x)\right)\cdots\right)\right)
+ \hat f(\pmb x\mid B_1,\ldots B_L)=\sigma\left(B_L \sigma\left( B_{L-1}\cdots\sigma\left(B_2\sigma (B_1\pmb x)\right)\cdots\right)\right)
\]
\medskip
- $\chi^2$ is called \emph{training error}
+ $\chi^2(\pmb a\mid\text{data})$ is called \emph{cost} or \emph{objective function}
+
+ \medskip
+
+ $\chi^2(\pmb a^*\mid\text{data})$ is call the \emph{training error}
\medskip