summaryrefslogtreecommitdiff
path: root/texmf-dist/doc/latex/siam/soda209.all
diff options
context:
space:
mode:
Diffstat (limited to 'texmf-dist/doc/latex/siam/soda209.all')
-rw-r--r--texmf-dist/doc/latex/siam/soda209.all682
1 files changed, 682 insertions, 0 deletions
diff --git a/texmf-dist/doc/latex/siam/soda209.all b/texmf-dist/doc/latex/siam/soda209.all
new file mode 100644
index 00000000..862535cc
--- /dev/null
+++ b/texmf-dist/doc/latex/siam/soda209.all
@@ -0,0 +1,682 @@
+%% This is soda209.all. This file is to be used for creating a paper
+%% in the ACM/SIAM Preprint series with LaTeX. It consists of the following
+%% two files:
+%%
+%% ltexprt.tex ---- an example and documentation file
+%% ltexprt.sty ---- the macro file
+%%
+%% To use, cut this file apart at the appropriate places. You can run the
+%% example file with the macros to get sample output.
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%% CUT HERE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%% ltexprt.tex %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% This is ltexprt.tex, an example file for use with the SIAM LaTeX (version 2.09)
+% Preprint Series macros. It is designed to provide double-column output.
+% Please take the time to read the following comments, as they document
+% how to use these macros. This file can be composed and printed out for
+% use as sample output.
+
+% Any comments or questions regarding these macros should be directed to:
+%
+% Corey Gray
+% SIAM
+% 3600 University City Science Center
+% Philadelphia, PA 19104-2688
+% USA
+% Telephone: (215) 382-9800
+% Fax: (215) 386-7999
+% e-mail: gray@siam.org
+
+
+% This file is to be used as an example for style only. It should not be read
+% for content.
+
+%%%%%%%%%%%%%%% PLEASE NOTE THE FOLLOWING STYLE RESTRICTIONS %%%%%%%%%%%%%%%
+
+%% 1. There are no new tags. Existing LaTeX tags have been formatted to match
+%% the Preprint series style.
+%%
+%% 2. You must use \cite in the text to mark your reference citations and
+%% \bibitem in the listing of references at the end of your chapter. See
+%% the examples in the following file. If you are using BibTeX, please
+%% supply the bst file with the manuscript file.
+%%
+%%
+%% 3. This macro is set up for two levels of headings (\section and
+%% \subsection). The macro will automatically number the headings for you.
+%%
+%% 4. No running heads are to be used in this volume.
+%%
+%% 5. Theorems, Lemmas, Definitions, etc. are to be double numbered,
+%% indicating the section and the occurence of that element
+%% within that section. (For example, the first theorem in the second
+%% section would be numbered 2.1. The macro will
+%% automatically do the numbering for you.
+%%
+%% 6. Figures, equations, and tables must be single-numbered.
+%% Use existing LaTeX tags for these elements.
+%% Numbering will be done automatically.
+%%
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+
+\documentstyle[twoside,leqno,twocolumn,ltexprt]{article}
+
+\begin{document}
+
+
+\title{\Large SIAM/ACM Preprint Series Macros for
+Use With LaTeX\thanks{Supported by GSF grants ABC123, DEF456, and GHI789.}}
+\author{Corey Gray\thanks{Society for Industrial and Applied Mathematics.} \\
+\and
+Tricia Manning\thanks{Society for Industrial and Applied Mathematics.}}
+\date{}
+
+\maketitle
+
+\pagestyle{myheadings}
+\markboth{}{}
+
+%\pagenumbering{arabic}
+
+
+\begin{abstract} \small\baselineskip=9pt This is the text of my abstract. It is a brief
+description of my
+paper, outlining the purposes and goals I am trying to address.\end{abstract}
+
+\section{Problem Specification.}In this paper, we consider the solution of the $N \times
+N$ linear
+system
+\begin{equation} \label{e1.1}
+A x = b
+\end{equation}
+where $A$ is large, sparse, symmetric, and positive definite. We consider
+the direct solution of (\ref{e1.1}) by means of general sparse Gaussian
+elimination. In such a procedure, we find a permutation matrix $P$, and
+compute the decomposition
+\[
+P A P^{t} = L D L^{t}
+\]
+where $L$ is unit lower triangular and $D$ is diagonal.
+
+
+\section{Design Considerations.}Several good ordering algorithms (nested dissection and
+minimum degree)
+are available for computing $P$ \cite{GEORGELIU}, \cite{ROSE72}.
+Since our interest here does not
+focus directly on the ordering, we assume for convenience that $P=I$,
+or that $A$ has been preordered to reflect an appropriate choice of $P$.
+
+Our purpose here is to examine the nonnumerical complexity of the
+sparse elimination algorithm given in \cite{BANKSMITH}.
+As was shown there, a general sparse elimination scheme based on the
+bordering algorithm requires less storage for pointers and
+row/column indices than more traditional implementations of general
+sparse elimination. This is accomplished by exploiting the m-tree,
+a particular spanning tree for the graph of the filled-in matrix.
+
+\begin{theorem} The method was extended to three
+dimensions. For the standard multigrid
+coarsening
+(in which, for a given grid, the next coarser grid has $1/8$
+as many points), anisotropic problems require plane
+relaxation to
+obtain a good smoothing factor.\end{theorem}
+
+Our purpose here is to examine the nonnumerical complexity of the
+sparse elimination algorithm given in \cite{BANKSMITH}.
+As was shown there, a general sparse elimination scheme based on the
+bordering algorithm requires less storage for pointers and
+row/column indices than more traditional implementations of general
+sparse elimination. This is accomplished by exploiting the m-tree,
+a particular spanning tree for the graph of the filled-in matrix.
+Several good ordering algorithms (nested dissection and minimum degree)
+are available for computing $P$ \cite{GEORGELIU}, \cite{ROSE72}.
+Since our interest here does not
+focus directly on the ordering, we assume for convenience that $P=I$,
+or that $A$ has been preordered to reflect an appropriate choice of $P$.
+
+\begin{proof} In this paper we consider two methods. The first method
+is
+basically the method considered with two differences:
+first, we perform plane relaxation by a two-dimensional
+multigrid method, and second, we use a slightly different
+choice of
+interpolation operator, which improves performance
+for nearly singular problems. In the second method coarsening
+is done by successively coarsening in each of the three
+independent variables and then ignoring the intermediate
+grids; this artifice simplifies coding considerably.
+\end{proof}
+
+Our purpose here is to examine the nonnumerical complexity of the
+sparse elimination algorithm given in \cite{BANKSMITH}.
+As was shown there, a general sparse elimination scheme based on the
+bordering algorithm requires less storage for pointers and
+row/column indices than more traditional implementations of general
+sparse elimination. This is accomplished by exploiting the m-tree,
+a particular spanning tree for the graph of the filled-in matrix.
+
+\begin{Definition}{\rm We describe the two methods in \S 1.2. In \S\ 1.3. we
+discuss
+some remaining details.}
+\end{Definition}
+
+Our purpose here is to examine the nonnumerical complexity of the
+sparse elimination algorithm given in \cite{BANKSMITH}.
+As was shown there, a general sparse elimination scheme based on the
+bordering algorithm requires less storage for pointers and
+row/column indices than more traditional implementations of general
+sparse elimination. This is accomplished by exploiting the m-tree,
+a particular spanning tree for the graph of the filled-in matrix.
+Several good ordering algorithms (nested dissection and minimum degree)
+are available for computing $P$ \cite{GEORGELIU}, \cite{ROSE72}.
+Since our interest here does not
+focus directly on the ordering, we assume for convenience that $P=I$,
+or that $A$ has been preordered to reflect an appropriate choice of $P$.
+
+Our purpose here is to examine the nonnumerical complexity of the
+sparse elimination algorithm given in \cite{BANKSMITH}.
+As was shown there, a general sparse elimination scheme based on the
+bordering algorithm requires less storage for pointers and
+row/column indices than more traditional implementations of general
+sparse elimination.
+
+\begin{lemma} We discuss first the choice for $I_{k-1}^k$
+which is a generalization. We assume that $G^{k-1}$ is
+obtained
+from $G^k$
+by standard coarsening; that is, if $G^k$ is a tensor product
+grid $G_{x}^k \times G_{y}^k \times G_{z}^k$,
+$G^{k-1}=G_{x}^{k-1} \times G_{y}^{k-1} \times G_{z}^{k-1}$,
+where $G_{x}^{k-1}$ is obtained by deleting every other grid
+point of $G_x^k$ and similarly for $G_{y}^k$ and $G_{z}^k$.
+\end{lemma}
+
+To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6]. In \S 1.3., we analyze the complexity of the old and new
+approaches to the intersection problem for the special case of
+an $n \times n$ grid ordered by nested dissection. The special
+structure of this problem allows us to make exact estimates of
+the complexity. To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6].
+
+In \S 1.2, we review the bordering algorithm, and introduce
+the sorting and intersection problems that arise in the
+sparse formulation of the algorithm.
+In \S 1.3., we analyze the complexity of the old and new
+approaches to the intersection problem for the special case of
+an $n \times n$ grid ordered by nested dissection. The special
+structure of this problem allows us to make exact estimates of
+the complexity. To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6].
+
+
+For the old approach, we show that the
+complexity of the intersection problem is $O(n^{3})$, the same
+as the complexity of the numerical computations. For the
+new approach, the complexity of the second part is reduced to
+$O(n^{2} (\log n)^{2})$.
+
+To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6]. In \S 1.3., we analyze the complexity of the old and new
+approaches to the intersection problem for the special case of
+an $n \times n$ grid ordered by nested dissection. The special
+structure of this problem allows us to make exact estimates of
+the complexity. To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6].
+This is accomplished by exploiting the m-tree,
+a particular spanning tree for the graph of the filled-in matrix.
+To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+\cite{EISENSTAT} - \cite{LIU2}, \cite{ROSE76}, \cite{SCHREIBER}.
+
+\subsection{Robustness.}\ We do not
+attempt to present an overview
+here, but rather attempt to focus on those results that
+are relevant to our particular algorithm.
+This section assumes prior knowledge of the role of graph theory
+in sparse Gaussian elimination; surveys of this role are
+available in \cite{ROSE72} and \cite{GEORGELIU}. More general
+discussions of elimination trees are given in
+\cite{LAW} - \cite{LIU2}, \cite{SCHREIBER}.
+Thus, at the $k$th stage, the bordering algorithm consists of
+solving the lower triangular system
+\begin{equation} \label{1.2}
+ L_{k-1}v = c
+\end{equation}
+and setting
+\begin{eqnarray}
+\ell &=& D^{-1}_{k-1}v , \\
+\delta &=& \alpha - \ell^{t} v .
+\end{eqnarray}
+
+\begin{figure}
+\vspace{14pc}
+\caption{This is a figure 1.1.}
+\end{figure}
+
+\section{Robustness.} We do not
+attempt to present an overview
+here, but rather attempt to focus on those results that
+are relevant to our particular algorithm.
+
+\subsection{Versatility.}\ The special
+structure of this problem allows us to make exact estimates of
+the complexity. For the old approach, we show that the
+complexity of the intersection problem is $O(n^{3})$, the same
+as the complexity of the numerical computations
+\cite{GEORGELIU}, \cite{ROSEWHITTEN}. For the
+new approach, the complexity of the second part is reduced to
+$O(n^{2} (\log n)^{2})$.
+
+To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6]. In \S 1.3., we analyze the complexity of the old and new
+approaches to the intersection problem for the special case of
+an $n \times n$ grid ordered by nested dissection. The special
+structure of this problem allows us to make exact estimates of
+the complexity. To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6].
+
+In \S 1.2, we review the bordering algorithm, and introduce
+the sorting and intersection problems that arise in the
+sparse formulation of the algorithm.
+In \S 1.3., we analyze the complexity of the old and new
+approaches to the intersection problem for the special case of
+an $n \times n$ grid ordered by nested dissection. The special
+structure of this problem allows us to make exact estimates of
+the complexity. To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6].
+
+
+For the old approach, we show that the
+complexity of the intersection problem is $O(n^{3})$, the same
+as the complexity of the numerical computations. For the
+new approach, the complexity of the second part is reduced to
+$O(n^{2} (\log n)^{2})$.
+
+To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6]. In \S 1.3., we analyze the complexity of the old and new
+approaches to the intersection problem for the special case of
+an $n \times n$ grid ordered by nested dissection. The special
+structure of this problem allows us to make exact estimates of
+the complexity. To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+[4] - [10], [5], [6].
+This is accomplished by exploiting the m-tree,
+a particular spanning tree for the graph of the filled-in matrix.
+To our knowledge, the m-tree previously has not been applied in this
+fashion to the numerical factorization, but it has been used,
+directly or indirectly, in several optimal order algorithms for
+computing the fill-in during the symbolic factorization phase
+\cite{EISENSTAT} - \cite{LIU2}, \cite{ROSE76}, \cite{SCHREIBER}.
+
+\begin{thebibliography}{99}
+
+%\bibitem{GUIDE}
+%R.~E. Bank, {\em PLTMG users' guide, edition 5.0}, tech. report,
+% Department of Mathematics, University of California, San Diego, CA, 1988.
+
+%\bibitem{HBMG}
+%R.~E. Bank, T.~F. Dupont, and H.~Yserentant, {\em The hierarchical basis
+% multigrid method}, Numer. Math., 52 (1988), pp.~427--458.
+
+\bibitem{BANKSMITH}
+R.~E. Bank and R.~K. Smith, {\em General sparse elimination requires no
+ permanent integer storage}, SIAM J. Sci. Stat. Comput., 8 (1987),
+ pp.~574--584.
+
+\bibitem{EISENSTAT}
+S.~C. Eisenstat, M.~C. Gursky, M.~Schultz, and A.~Sherman, {\em
+ Algorithms and data structures for sparse symmetric gaussian elimination},
+ SIAM J. Sci. Stat. Comput., 2 (1982), pp.~225--237.
+
+\bibitem{GEORGELIU}
+A.~George and J.~Liu, {\em Computer Solution of Large Sparse Positive
+ Definite Systems}, Prentice Hall, Englewood Cliffs, NJ, 1981.
+
+\bibitem{LAW}
+K.~H. Law and S.~J. Fenves, {\em A node addition model for symbolic
+ factorization}, ACM TOMS, 12 (1986), pp.~37--50.
+
+\bibitem{LIU}
+J.~W.~H. Liu, {\em A compact row storage scheme for cholesky factors
+ using elimination trees}, ACM TOMS, 12 (1986), pp.~127--148.
+
+\bibitem{LIU2}
+\sameauthor , {\em The role of
+ elimination trees in sparse factorization}, Tech. Report CS-87-12,Department
+ of Computer Science, York University, Ontario, Canada, 1987.
+
+\bibitem{ROSE72}
+D.~J. Rose, {\em A graph theoretic study of the numeric solution of
+ sparse positive definite systems}, in Graph Theory and Computing, AcademicΠPress, New
+York, 1972.
+
+\bibitem{ROSE76}
+D.~J. Rose, R.~E. Tarjan, and G.~S. Lueker, {\em Algorithmic aspects of
+ vertex elimination on graphs}, SIAM J. Comput., 5 (1976), pp.~226--283.
+
+\bibitem{ROSEWHITTEN}
+D.~J. Rose and G.~F. Whitten, {\em A recursive analysis of disection
+ strategies}, in Sparse Matrix Computations, Academic Press, New York, 1976.
+
+\bibitem{SCHREIBER}
+R.~Schrieber, {\em A new implementation of sparse gaussian elimination},
+ ACM TOMS, 8 (1982), pp.~256--276.
+
+\end{thebibliography}
+\end{document}
+
+% End of ltexprt.tex
+%
+%
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% CUT HERE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+%
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ltexprt.sty %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% This is ltexprt.sty, a file of macros and definitions for creating a
+% chapter for publication in the ACM/SIAM Preprint series using LaTeX (version 2.09).
+% It is designed to produce double-column output.
+% This file may be freely distributed but may not be altered in any way.
+% Any comments or questions regarding these macros should be directed to:
+
+% Corey Gray
+% SIAM
+% 3600 University City Science Center
+% Philadelphia, PA 19104-2688
+% USA
+% Telephone: (215) 382-9800
+% Fax: (215) 386-7999
+% e-mail: gray@siam.org
+
+
+% Report the version.
+\message{*** ACM/SIAM LaTeX 2.09 Preprint Series macro package, version 1.0,
+September 24,1990 ***}
+
+
+\pretolerance=800
+\tolerance=10000
+\sloppy
+
+\voffset=-.5in
+\hoffset=-.5in
+\vsize=55pc
+\hsize=41pc
+\baselineskip=14pt
+\footskip=18pt
+\topmargin 24pt
+\headheight 12pt
+\headsep 17pt
+\textheight 52.5pc \advance\textheight by \topskip
+\textwidth 41pc
+\parskip 0pt
+\parindent 18pt
+
+\font\tensmc=cmcsc10
+\def\smc{\tensmc}
+
+%% footnotes to be set 8/10
+\def\footnotesize{\@setsize\footnotesize{10pt}\viiipt\@viiipt
+ % \indent
+ \abovedisplayskip \z@
+ \belowdisplayskip\z@
+ \abovedisplayshortskip\abovedisplayskip
+ \belowdisplayshortskip\belowdisplayshortskip
+ \def\@listi{\leftmargin\leftmargini \topsep 3pt plus 1pt minus 1pt
+ \parsep 2pt plus 1pt minus 1pt
+ \itemsep \parsep}}
+
+\let\referencesize\footnotesize
+
+\footnotesep 0pt
+
+\skip\footins 12pt plus 12pt
+
+\def\footnoterule{\kern3\p@ \hrule width 3em} % the \hrule is .4pt high
+
+\def\ps@plain{\let\@mkboth\@gobbletwo
+ \def\@oddfoot{{\hfil\small\thepage\hfil}}%
+ \def\@oddhead{}
+ \def\@evenhead{}\def\@evenfoot{}}
+
+
+
+
+
+\def\ps@headings{\let\@mkboth\markboth
+ \def\@oddfoot{}\def\@evenfoot{}%
+ \def\@evenhead{{\rm\thepage}\hfil{\small\leftmark}}%
+ \def\@oddhead{{\noindent\small\rightmark}\hfil{\rm\thepage}}%
+
+
+
+\def\ps@myheadings{\let\@mkboth\@gobbletwo
+ \def\@oddfoot{}\def\@evenfoot{}%
+ \def\@oddhead{\rlap{\normalsize\rm\rightmark}\hfil{small\thepage}}%
+ \def\@evenhead%{\hfil{\small\@chapapp}\
+ {\small\thepage}\hfil\llap{\normalsize\rm\leftmark}}%
+ \def\chaptermark##1{}%
+ \def\sectionmark##1{}\def\subsectionmark##1{}}
+
+
+\def\theequation{\arabic{section}.\arabic{equation}}
+
+
+\def\section{\@startsection{section}{1}{0pt}{-12pt}{3pt}{\hyphenpenalty=\@M
+\exhyphenpenalty=\@M\normalsize\bf}}
+\def\subsection{\@startsection{subsection}{2}{0pt}{-12pt}{0pt}{\normalsize\bf}
+}
+\def\subsubsection{\@startsection
+ {subsubsection}{3}{0pt}{-12pt}{0pt}{\normalsize\bf}}
+\def\paragraph{\@startsection
+ {paragraph}{4}{\parindent}{0pt}{0pt}{\normalsize\bf}}
+\def\subparagraph{\@startsection
+ {subparagraph}{4}{\parindent}{0pt}{0pt}{\normalsize\bf}}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% %
+% THEOREMS, PROOFS, ALGORITHMS %
+% %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%% defined proof environment by theorem model (took out counter)
+
+\def\newproof#1{\@nprf{#1}}
+
+\def\@nprf#1#2{\@xnprf{#1}{#2}}
+
+\def\@xnprf#1#2{\expandafter\@ifdefinable\csname #1\endcsname
+\global\@namedef{#1}{\@prf{#1}{#2}}\global\@namedef{end#1}{\@endproof}}
+
+\def\@prf#1#2{\@xprf{#1}{#2}}
+
+\def\@xprf#1#2{\@beginproof{#2}{\csname the#1\endcsname}\ignorespaces}
+
+
+
+%%% defined algorithm environment by theorem model
+
+\def\newalgorithm#1{\@ifnextchar[{\@oalg{#1}}{\@nalg{#1}}}
+
+\def\@nalg#1#2{%
+\@ifnextchar[{\@xnalg{#1}{#2}}{\@ynalg{#1}{#2}}}
+
+\def\@xnalg#1#2[#3]{\expandafter\@ifdefinable\csname #1\endcsname
+{\@definecounter{#1}\@addtoreset{#1}{#3}%
+\expandafter\xdef\csname the#1\endcsname{\expandafter\noexpand
+ \csname the#3\endcsname \@thmcountersep \@thmcounter{#1}}%
+\global\@namedef{#1}{\@alg{#1}{#2}}\global\@namedef{end#1}{\@endalgorithm}}}
+
+\def\@ynalg#1#2{\expandafter\@ifdefinable\csname #1\endcsname
+{\@definecounter{#1}%
+\expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}%
+\global\@namedef{#1}{\@alg{#1}{#2}}\global\@namedef{end#1}{\@endalgorithm}}}
+
+\def\@oalg#1[#2]#3{\expandafter\@ifdefinable\csname #1\endcsname
+ {\global\@namedef{the#1}{\@nameuse{the#2}}%
+\global\@namedef{#1}{\@alg{#2}{#3}}%
+\global\@namedef{end#1}{\@endalgorithm}}}
+
+\def\@alg#1#2{\refstepcounter
+ {#1}\@ifnextchar[{\@yalg{#1}{#2}}{\@xalg{#1}{#2}}}
+
+\def\@xalg#1#2{\@beginalgorithm{#2}{\csname the#1\endcsname}\ignorespaces}
+\def\@yalg#1#2[#3]{\@opargbeginalgorithm{#2}{\csname
+ the#1\endcsname}{#3}\ignorespaces}
+
+
+
+
+\def\@beginproof#1{\rm \trivlist \item[\hskip \labelsep{\it #1.\/}]}
+\def\@endproof{\outerparskip 0pt\endtrivlist}
+
+\def\@begintheorem#1#2{\it \trivlist \item[\hskip \labelsep{\sc #1\ #2.}]}
+\def\@opargbegintheorem#1#2#3{\it \trivlist
+ \item[\hskip \labelsep{\sc #1\ #2.\ (#3)}]}
+\def\@endtheorem{\outerparskip 0pt\endtrivlist}
+
+%\def\@begindefinition#1#2{\rm \trivlist \item[\hskip \labelsep{\sc #1\ #2.}]}
+%\def\@opargbegindefinition#1#2#3{\rm \trivlist
+% \item[\hskip \labelsep{\sc #1\ #2.\ (#3)}]}
+%\def\@enddefinition{\outerparskip 0pt\endtrivlist}
+
+
+\def\@beginalgorithm#1#2{\rm \trivlist \item[\hskip \labelsep{\sc #1\ #2.}]}
+\def\@opargbeginalgorithm#1#2#3{\rm \trivlist
+ \item[\hskip \labelsep{\sc #1\ #2.\ (#3)}]}
+\def\@endalgorithm{\outerparskip 6pt\endtrivlist}
+
+
+\newskip\outerparskip
+
+\def\trivlist{\parsep\outerparskip
+ \@trivlist \labelwidth\z@ \leftmargin\z@
+ \itemindent\parindent \def\makelabel##1{##1}}
+
+\def\@trivlist{\topsep=0pt\@topsepadd\topsep
+ \if@noskipsec \leavevmode \fi
+ \ifvmode \advance\@topsepadd\partopsep \else \unskip\par\fi
+ \if@inlabel \@noparitemtrue \@noparlisttrue
+ \else \@noparlistfalse \@topsep\@topsepadd \fi
+ \advance\@topsep \parskip
+ \leftskip\z@\rightskip\@rightskip \parfillskip\@flushglue
+ \@setpar{\if@newlist\else{\@@par}\fi}%
+ \global\@newlisttrue \@outerparskip\parskip}
+
+
+\def\endtrivlist{\if@newlist\@noitemerr\fi
+ \if@inlabel\indent\fi
+ \ifhmode\unskip \par\fi
+ \if@noparlist \else
+ \ifdim\lastskip >\z@ \@tempskipa\lastskip \vskip -\lastskip
+ \advance\@tempskipa\parskip \advance\@tempskipa -\@outerparskip
+ \vskip\@tempskipa
+ \fi\@endparenv\fi
+ \vskip\outerparskip}
+
+
+
+ \newproof{@proof}{Proof}
+ \newenvironment{proof}{\begin{@proof}}{\end{@proof}}
+
+ \newtheorem{@theorem}{Theorem}[section]
+ \newenvironment{theorem}{\begin{@theorem}}{\end{@theorem}}
+
+ \newalgorithm{@algorithm}{Algorithm}[section]
+ \newenvironment{algorithm}{\begin{@algorithm}}{\end{@algorithm}}
+
+
+
+\newtheorem{lemma}{Lemma}[section]
+\newtheorem{fact}{Fact}[section]
+\newtheorem{corollary}{Corollary}[section]
+\newtheorem{axiom}{Axiom}[section]
+\newtheorem{cond}{Condition}[section]
+\newtheorem{property}{Property}[section]
+\newtheorem{proposition}{Proposition}[section]
+
+\newtheorem{Conjecture}{Conjecture}[section]
+%\newtheorem{Corollary}[Theorem]{Corollary}
+\newtheorem{Definition}{Definition}[section]
+\newtheorem{Lemma}{Lemma}[section]
+\newtheorem{Remark}{Remark}[section]
+
+\newproof{Example}{Example}
+\newproof{Method}{Method}
+\newproof{Exercise}{Exercise}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% BIBLIOGRAPHY %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+\def\thebibliography#1{%
+%\cleardoublepage
+\parindent 0em
+\vspace{6pt}
+\begin{flushleft}\normalsize\bf References\end{flushleft}
+\addvspace{3pt}\nopagebreak\list
+ %% default is no labels, for those not using \cite or BibTeX
+% {[\arabic{enumi}]} {\settowidth\labelwidth{[#1]}
+{[\arabic{enumi}]}{\settowidth\labelwidth{mm}
+\leftmargin\labelwidth
+ \advance\leftmargin\labelsep
+ \usecounter{enumi}\@bibsetup}
+\def\newblock{\hskip .11em plus .33em minus -.07em}
+ \sloppy\clubpenalty4000\widowpenalty4000
+ \sfcode`\.=1000\relax}
+
+%% setup 8/10 type
+\def\@bibsetup{\itemindent=0pt \itemsep=0pt \parsep=0pt
+\small}
+
+\def\sameauthor{\leavevmode\vrule height 2pt depth -1.6pt width 23pt}
+
+%
+%% End of ltexprt.sty
+%
+%%%%%%%%%%%%%%%%%%%%%%%%% End of soda209.all %%%%%%%%%%%%%%%%%%%%%%%