summaryrefslogtreecommitdiff
path: root/macros/latex/contrib/mlmath/mlmath.tex
blob: abd27b42f8aa233ca2ab9a895c4d63c93a4994dd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
%% mlmath.tex
%% Copyright 2020 by Zheng Ma, Zhiqin Xu, Tao Luo and Yaoyu Zhang
%
% This work may be distributed and/or modified under the
% conditions of the LaTeX Project Public License, either version 1.3c
% of this license or (at your option) any later version.
% The latest version of this license is in
%   http://www.latex-project.org/lppl.txt
% and version 1.3c or later is part of all distributions of LaTeX
% version 2005/12/01 or later.
%
% This work has the LPPL maintenance status `maintained'.
% 
% The Current Maintainer of this work is Zheng Ma.
%
% This work consists of the main source files mlmath.sty, mlmath.tex, mlmath.md,
% and the derived documentation mlmath.pdf, README.md as well as the resource
% files (they are svg file) of README.md under the folder svgs/.

%! TeX program = xelatex
\documentclass{article}
\usepackage{geometry}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{graphicx}
\usepackage{hyperref}
\usepackage{xcolor}
\usepackage{booktabs}
\usepackage[scheme=plain]{ctex}

\usepackage{mlmath}
%%%%%%%%%%%%%%%%%%%%%%%
% \NewDocumentCommand{\tens}{t_}
%  {%
%   \IfBooleanTF{#1}
%   {\tensop}
%   {\otimes}%
%  }
% \NewDocumentCommand{\tensop}{m}
%  {%
%   \mathbin{\mathop{\otimes}\displaylimits_{#1}}%
%  }

%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{document}

\title{Suggested Notation for Machine Learning}
\author{Beijing Academy of Artificial Intelligence\thanks{This document is published by Beijing Academy of Artificial Intelligence  (北京智源人工智能研究院)  jointly with Peking University (北京大学) and Shanghai Jiao Tong University (上海交通大学). The first version is drafted by Zhi-Qin John Xu (Corresponding: xuzhiqin@sjtu.edu.cn, Shanghai Jiao Tong University), Tao Luo (Purdue University), Zheng Ma (Purdue University), Yaoyu Zhang (Institute for Advanced Study).}}

%\author{Zhi-Qin John Xu\thanks{Corresponding: xuzhiqin@sjtu.edu.cn, Shanghai Jiao Tong University.}, Tao Luo\thanks{Purdue University}, Zheng Ma\thanks{Purdue University}, Yaoyu Zhang\thanks{Institute for Advanced Study}}

\maketitle
%\date{\today}
\begin{abstract}
    The field of machine learning is evolving rapidly in recent years. Communication between different researchers and research groups becomes increasingly important. A key challenge for communication arises from inconsistent notation usages among different papers. This proposal suggests a standard for commonly used mathematical notation for machine learning. In this first version, only some notation are mentioned and more notation are left to be done. This proposal will be regularly updated based on the progress of the field. We look forward to more suggestions to improve this proposal in future versions.
\end{abstract}

\tableofcontents

\section{Dataset}~\\
Dataset  $S=\{\vz_i\}_{i=1}^n=\{(\vx_i,\vy_i)\}_{i=1}^n$ is sampled from a distribution $\fD$ over a domain $\fZ=\fX\times\fY$.

$\fX$  is the instance domain (a set), $\fY$ is the label domain (a set), and $\fZ=\fX\times\fY$ is the example domain (a set).

Usually,
$\fX$ is a subset of $\sR^d$ and $\fY$ is a subset of $\sR^{d_{o}}$, where $d$ is the input dimension, $d_{o}$ is the output dimension.

$n=\#S$ is the number of samples. Without specification, $S$ and $n$ are for the training set.

\section{Function}~\\
A hypothesis space is denoted by $\fH$. A hypothesis function is denoted by $f_{\vtheta}(\vx)\in\fH$ or $f(\vx;\vtheta)\in\fH$ with $f_{\vtheta}:\fX\to\fY$.

$\vtheta$  denotes the set of parameters of  $f_{\vtheta}$.

If there exists a target function, it is denoted by $f^*$or $f:\fX\to\fY$ satisfying $\vy_i=f^*(\vx_i)$ for $i=1,\ldots,n$.

\section{Loss function}~\\
A loss function, denoted by $\ell:\fH\times\fZ\to\sR_+:=[0,+\infty)$, measures the difference between a predicted label and a true label, e.g., $L^2$ loss:
\[
    \ell(f_{\vtheta},\vz)=\frac{1}{2}(f_{\vtheta}(\vx)-\vy)^2,
\]
where $\vz=(\vx,\vy)$. $\ell(f_{\vtheta},\vz)$ can also be written as
\[
    \ell(f_{\vtheta}(\vx),\vy)
\]
for convenience.
Empirical risk or training loss for a set $S=\{(\vx_i,\vy_i)\}_{i=1}^{n}$ is denoted by   $\LS(\vtheta)$ or $L_{n}(\vtheta)$ or $R_{n}(\vtheta)$ or $R_{S}(\vtheta)$,
\begin{equation}
    \LS(\vtheta) =\frac{1}{n}\sum_{i=1}^n\ell(f_{\vtheta}(\vx_i),\vy_i).
\end{equation}


The population risk or expected loss is denoted by $L_{\fD}(\vtheta)$ or $R_{\fD}(\vtheta)$
\begin{equation}
    \LD(\vtheta) =\Exp_{\fD}\ell(f_{\vtheta}(\vx),\vy),
\end{equation}
where $\vz=(\vx,\vy)$ follows the distribution $\fD$.

\section{Activation function}
An activation function is denoted by $\sigma(x)$.
\begin{exam}Some commonly used activation functions are~\\
    \begin{enumerate}
        \item $\sigma(x) = \ReLU (x) = \max (0, x)$;
        \item $\sigma(x) ={\rm sigmoid}(x)= \frac{1}{1 + \E^{-x}}$;
        \item $\sigma(x) = \tanh x$;
        \item $\sigma(x) = \cos x, \sin x$.
    \end{enumerate}
\end{exam}


\section{Two-layer neural network}~\\
The neuron number of the hidden layer is denoted by $m$. The two-layer neural network is
\begin{equation}
    f_{\vtheta}(\vx)=\sum_{j=1}^{m} a_j \sigma (\vw_j\cdot \vx + b_j),
\end{equation}
where $\sigma$ is the activation function, $\vw_j$ is the input weight, $a_j$ is the output weight, $b_j$ is the bias term. We denote the set of parameters by
\[
    \vtheta=(a_1,\cdots,a_m,\vw_1,\cdots,\vw_m,b_1,\cdots,b_m).
\]
\section{General deep neural network}~\\
The counting of the layer number excludes the input layer. An $L$-layer neural network is denoted by
\begin{equation}
    f_{\vtheta}(\vx) = \vW^{[L-1]} \sigma\circ(\mW^{[L-2]}\sigma\circ(\cdots (\mW^{[1]} \sigma\circ(\mW^{[0]} \vx + \vb^{[0]} ) + \vb^{[1]} )\cdots)+\vb^{[L-2]})+\vb^{[L-1]},
\end{equation}
where $\mW^{[l]} \in \sR^{m_{l+1}\times m_{l}}$, $\vb^{[l]}=\sR^{m_{l+1}}$, $m_0=d_{\rm in}=d$, $m_{L}=d_{\rm o}$,
$\sigma$ is a scalar function and ``$\circ$'' means entry-wise operation.
We denote the set of parameters by \[
    \vtheta=(\mW^{[0]},\mW^{[1]},\ldots,\vW^{[L-1]},\vb^{[0]},\vb^{[1]},\ldots,\vb^{[L-1]}),
\]
and  an entry of $\vW^{[l]}$ by   $\vW^{[l]}_{ij}$. This can also be defined recursively.
\begin{align}
     & f_{\vtheta}^{[0]}(\vx)=\vx,                                                                                   \\
     & f_{\vtheta}^{[l]}(\vx)=\sigma\circ(\mW^{[l-1]} f_{\vtheta}^{[l-1]}(\vx) + \vb^{[l-1]}) \quad 1\leq l\leq L-1, \\
     & f_{\vtheta}(\vx)=f_{\vtheta}^{[L]}(\vx)=\mW^{[L-1]} f_{\vtheta}^{[L-1]}(\vx) + \vb^{[L-1]}.
\end{align}

\section{Complexity}~\\
The VC-dimension of a hypothesis class $\fH$ is denoted ${\rm VCdim}(\fH)$.

The Rademacher complexity of a hypothesis space $\fH$ on a sample set $S$ is denoted by ${\rm Rad} (\fH\circ S)$ or ${\rm Rad}_{S} (\fH)$. The complexity ${\rm Rad}_{S} (\fH)$ is random because of the randomness of $S$. The expectation of the empirical Rademacher complexity over all samples of size $n$ is denoted by
\[
    {\rm Rad}_{n} (\fH)=\Exp_{S} {\rm Rad}_{S} (\fH).
\]

\section{Training}~\\
The Gradient Descent is often denoted by GD. The Stochastic Gradient Descent is often denoted by SGD.

A batch set is denoted by $B$ and the batch size is denoted by $|B|$.

The learning rate is denoted by $\eta$.

%\section{Gram matrix}~\\
%The Gram matrix is denoted by $K_n$.

\section{Fourier Frequency}~\\
The discretized frequency is denoted by $\vk$, and the continuous frequency is denoted by $\vxi$.

\section{Convolution}~\\
The convolution operation is denoted by $*$.

\newpage
\section{Notation table}~\\
\begin{center}
    \begin{tabular}{llll}
        \toprule
        symbol                                              & meaning                                 & \LaTeX                  & simplied                \\
        \midrule

        $\vx$                                               & input                                   & \verb!\bm{x}!  & \verb!\vx!  \\
        $\vy$                                               & output, label                           & \verb!\bm{y}!  & \verb!\vy!  \\
        $d$                                                 & input dimension                         & \verb!d!  &                         \\
        $d_{\rm o}$                                         & output dimension                        & \verb!d_{\rm o}!  &                         \\
        $n$                                                 & number of samples                       & \verb!n!                            \\
        $\fX$
                                                            & instances domain (a set)                & \verb!\mathcal{X}!  & \verb!\fX!  \\
        $\fY$
                                                            & labels domain (a set)                   & \verb!\mathcal{Y}! & \verb!\fY! \\
        $\fZ$                                               & $=\fX\times\fY$ example domain          & \verb!\mathcal{Z}! & \verb!\fZ! \\
        $\fH$                                               & hypothesis space (a set)                & \verb!\mathcal{H}! & \verb!\fH! \\
        $\vtheta$                                           & a set of parameters                     & \verb!\bm{\theta}! & \verb!\vtheta! \\
        $f_{\vtheta}:\fX\to \fY$                            & hypothesis function                     & \verb!\f_{\bm{\theta}}! & \verb!f_{\vtheta}! \\
        $f$ or $f^*:\fX\to\fY$                              & target function                         & \verb!f,f^*!                           \\
        $\ell:\fH\times \fZ\to \sR^+$                       & loss function                           & \verb!\ell!                           \\
        $\fD$                                               & distribution of  $\fZ$                  & \verb!\mathcal{D}! & \verb!\fD! \\
        $S=\{\vz_i\}_{i=1}^n$                               & $=\{(\vx_i,\vy_i)\}_{i=1}^n$ sample set                                                     \\
        \begin{tabular}{@{}l @{}}$\LS(\vtheta)$, $L_{n}(\vtheta)$,\\$R_{n}(\vtheta)$, $R_{S}(\vtheta)$\end{tabular}                           & empirical risk or training loss                                                             \\
        $\LD(\vtheta)$, $R_D(\vtheta)$                      & population risk or expected loss                                                            \\
        $\sigma:\sR\to\sR^+$                                & activation function                     & \verb!\sigma!                           \\
        $\vw_j$                                             & input weight                            & \verb!\bm{w}_j! & \verb!\vw_j! \\
        $a_j$                                               & output weight                           & \verb!a_j!                           \\
        $b_j$                                               & bias term                               & \verb!b_j!                           \\
        $f_{\vtheta}(\vx)$ or $f(\vx;\vtheta)$              & neural network                          & \verb!f_{\bm{\theta}}! & \verb!f_{\vtheta}! \\
        $\sum_{j=1}^{m} a_j \sigma (\vw_j\cdot \vx + b_j) $ & two-layer neural network                                                                    \\
        ${\rm VCdim}(\fH)$                                  & VC-dimension of  $\fH$                                                                      \\
        ${\rm Rad} (\fH\circ S)$, ${\rm Rad}_{S} (\fH)$     & Rademacher complexity of $\fH$ on $S$                                                       \\
        ${\rm Rad}_{n} (\fH)$                               & \begin{tabular}{@{}l @{}}Rademacher complexity\\over samples of size $n$\end{tabular}                                                                   \\
        GD                                                  & gradient descent                                                                            \\
        SGD                                                 & stochastic gradient descent                                                                 \\
        $B$                                                 & a batch set                             & \verb!B!                           \\
        $|B|$                                               & batch size                              & \verb!b!                           \\
        $\eta$                                              & learning rate                           & \verb!\eta!                           \\
        %$K_n$&Gram matrix&\verb!K_n!\\
        $\vk$                                               & discretized frequency                   & \verb!\bm{k}! & \verb!\vk! \\
        $\vxi$                                              & continuous frequency                    & \verb!\bm{\xi}! & \verb!\vxi! \\
        $*$                                                 & convolution operation                   & \verb!*!                           \\
        \bottomrule
    \end{tabular}
\end{center}

\newpage
\section{$L$-layer neural network}~\\
\begin{center}
    \begin{tabular}{llll}
        \toprule
        symbol                   & meaning                                                                                     & \LaTeX                  & simplied                \\
        \midrule
        $d$                      & input dimension                                                                             & \verb!d! &                         \\
        $d_{\rm o}$              & output dimension                                                                            & \verb!d_{\rm o}! &                         \\
        $m_l$                    & the number of $l$th layer neuron, $m_0=d$, $m_{L} = d_{\rm o}$                              & \verb!m_l!                           \\
        $\mW^{[l]}$              & the $l$th layer weight                                                                      & \verb!\bm{W}^{[l]}! & \verb!\mW^{[l]}! \\
        $\vb^{[l]}$              & the $l$th layer bias term                                                                   & \verb!\bm{b}^{[l]}! & \verb!\vb^{[l]}! \\
        $\circ$                  & entry-wise operation                                                                        & \verb!\circ!                           \\
        $\sigma:\sR\to\sR^+$     & activation function                                                                         & \verb!\sigma!                           \\
        $\vtheta$                & $=(\mW^{[0]},\ldots,\vW^{[L-1]},\vb^{[0]},\ldots,\vb^{[L-1]})$,  parameters                 & \verb!\bm{\theta}! & \verb!\vtheta! \\
        $f_{\vtheta}^{[0]}(\vx)$ & $=\vx$                                                                                                                                          \\
        $f_{\vtheta}^{[l]}(\vx)$ & $=\sigma\circ(\mW^{[l-1]} f_{\vtheta}^{[l-1]}(\vx) + \vb^{[l-1]})$,  $l$-th  layer output                                                       \\
        $f_{\vtheta}(\vx)$       & $=f_{\vtheta}^{[L]}(\vx)=\mW^{[L-1]} f_{\vtheta}^{[L-1]}(\vx) + \vb^{[L-1]}$,  $L$-layer NN                                                     \\
        \bottomrule
    \end{tabular}
\end{center}

\newpage
\section{Acknowledgements}
Chenglong Bao (Tsinghua), Zhengdao Chen (NYU), Bin Dong (Peking), Weinan E (Princeton),  Quanquan Gu (UCLA), Kaizhu Huang (XJTLU), Shi Jin (SJTU), Jian Li (Tsinghua), Lei Li (SJTU), Tiejun Li (Peking),   Zhenguo Li (Huawei), Zhemin Li (NUDT), Shaobo Lin (XJTU), Ziqi Liu (CSRC),  Zichao Long (Peking), Chao Ma (Princeton),  Chao Ma (SJTU), Yuheng Ma (WHU),    Dengyu Meng (XJTU), Wang Miao (Peking),  Pingbing Ming (CAS), Zuoqiang Shi (Tsinghua), Jihong Wang (CSRC), Liwei Wang (Peking), Bican Xia (Peking), Zhouwang Yang (USTC),  Haijun Yu (CAS),  Yang Yuan  (Tsinghua),  Cheng Zhang (Peking),  Lulu Zhang (SJTU), Jiwei Zhang  (WHU),   Pingwen Zhang (Peking), Xiaoqun Zhang (SJTU),  Chengchao Zhao (CSRC), Zhanxing Zhu (Peking), Chuan Zhou (CAS),  Xiang Zhou (cityU).







\end{document}