Skip to content

Commit

Permalink
Address some comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
abergeron committed Oct 29, 2014
1 parent b3e89f7 commit 062412a
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 41 deletions.
11 changes: 3 additions & 8 deletions 05_tripleop/01_tripleop_soln.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from theano import Op, Apply
from theano.tensor import as_tensor_variable

class `TripleOp`(Op):
class TripleOp(Op):
__props__ = ()

def make_node(self, x):
Expand All @@ -11,15 +11,10 @@ def make_node(self, x):
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = x * `3`
z[0] = x * 3

def infer_shape(self, node, i0_shapes):
return i0_shapes

def grad(self, inputs, output_grads):
return [output_grads[0] * `3`]

def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
return [output_grads[0] * 3]
14 changes: 14 additions & 0 deletions 06_scalmulop/01_scalmulop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from theano import Op, Apply
from theano.tensor import as_tensor_variable

class DoubleOp(Op):
__props__ = ()

def make_node(self, x):
x = as_tensor_variable(x)
return Apply(self, [x], [x.type()])

def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = x * 2
35 changes: 35 additions & 0 deletions 06_scalmulop/01_scalmulop_soln.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
from theano import Op, Apply
from theano.tensor import as_tensor_variable
from theano.scalar import as_scalar_variable

class ScalMulV1(Op):
__props__ = ('scal')

def __init__(self, scal):
if not isinstance(scal, int):
raise TypeError('expected an int')
self.scal = scal

def make_node(self, x):
x = as_tensor_variable(x)
return Apply(self, [x], [x.type()])

def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = x * self.scal


class ScalMulV2(Op):
__props__ = ()

def make_node(self, x, scal):
x = as_tensor_variable(x)
scal = as_scalar_variable(scal)
return Apply(self, [x, scal], [x.type()])

def perform(self, node, inputs, output_storage):
x = inputs[0]
scal = inputs[1]
z = output_storage[0]
z[0] = x * scal
67 changes: 47 additions & 20 deletions advanced.tex
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
tabsize=4,
backgroundcolor=\color{lightgray},
frame=single,
%showlines=true,
%emph={theano,MyOp,DoubleOp}, emphstyle=\color{lightblue}\bfseries,
emph={[2]__init__,make_node,perform,infer_shape,c_code,make_thunk,grad,R_op},emphstyle={[2]\color{methblue}},
emph={[3]self},emphstyle={[3]\color{darkgreen}},
Expand All @@ -48,6 +49,14 @@
\frame[plain]{\titlepage}

\section*{}

\begin{frame}{Setup}
\begin{enumerate}
\item Make sure you have Theano installed somewhere
\item Clone this repository: \url{https://github.com/abergeron/ccw_tutorial_theano.git}
\end{enumerate}
\end{frame}

\begin{frame}{Outline}
\begin{enumerate}
\item How to Make an Op (Python) (45 min)
Expand All @@ -59,6 +68,12 @@ \section*{}

\section{How to Make an Op (Python)}

\begin{frame}[plain]{}
\begin{center}
\Huge How to Make an Op (Python)
\end{center}
\end{frame}

\begin{frame}[fragile]{Overview}
\lstinputlisting[lastline=14]{python.py}
\end{frame}
Expand Down Expand Up @@ -152,8 +167,10 @@ \section{How to Make an Op (Python)}
\end{frame}

\begin{frame}{Exercise: ScalMulOp}
Now what would we need to change in the DoubleOp code to make it multiply by an arbitrary number?
\lstinputlisting[lastline=15]{doubleop.py}
\begin{center}
Work though the "06\_scalmulop" directory now.
It is available at \url{https://github.com/abergeron/ccw_tutorial_theano.git}.
\end{center}
\end{frame}

\begin{frame}{\code{infer_shape}}
Expand All @@ -170,7 +187,7 @@ \section{How to Make an Op (Python)}
\lstinputlisting[firstline=16,lastline=18]{doubleop.py}
\begin{itemize}
\item Here the code is really simple since we don't change the shape in any way in our Op
\item \code{input_shapes} would be \code{[x.shape]} or an expression equivalent to it
\item \code{input_shapes} would be an expression equivalent to \code{[x.shape]}
\end{itemize}
\end{frame}

Expand Down Expand Up @@ -203,18 +220,8 @@ \section{How to Make an Op (Python)}
It will compute the gradient numerically and symbolically (using our \code{grad()} method) and compare the two.
\end{frame}

%\subsection{\code{R_op}}

%\begin{frame}{Description}
%\end{frame}

%\begin{frame}{Example}
%\end{frame}

%\begin{frame}{Tests}
%\end{frame}

\begin{frame}{Add Special Methods to ScalMulOp}
\begin{frame}{Exercice: Add Special Methods to ScalMulOp}
Work through the "07\_scalmulgrad" directory
\begin{itemize}
\item Take the ScalMulOp class you made and add the \code{infer_shape} and \code{grad} methods to it.
\item Don't forget to make tests for your new class to make sure everything works correctly.
Expand All @@ -223,6 +230,12 @@ \section{How to Make an Op (Python)}

\section{How to Make an Op (C)}

\begin{frame}[plain]{}
\begin{center}
\Huge How to Make an Op (C)
\end{center}
\end{frame}

\begin{frame}{Overview}
\lstinputlisting{c.py}
\end{frame}
Expand Down Expand Up @@ -298,9 +311,10 @@ \section{How to Make an Op (C)}
\begin{frame}{Tests}
\begin{itemize}
\item Testing ops with C code is done the same way as testing for python ops
\item One thing to watch for is that tests for ops which don't have python code
\item One thing to watch for is tests for ops which don't have python code
\begin{itemize}
\item You should skip the test in those cases
\item Test for \code{theano.config.gxx == ""}
\end{itemize}
\item Using DebugMode will compare the output of the Python version to the output of the C version and raise an error if they don't match
\end{itemize}
Expand All @@ -324,7 +338,13 @@ \section{How to Make an Op (C)}
\end{itemize}
\end{frame}

\section{How to make a complex Op}
\section{How to Make a Complex Op}

\begin{frame}[plain]{}
\begin{center}
\Huge How to Make a Complex Op
\end{center}
\end{frame}

\begin{frame}{\code{make_thunk}}
\lstinputlisting[linerange={12-14}]{thunk.py}
Expand All @@ -342,6 +362,12 @@ \section{How to make a complex Op}
\section{Optimizations}
% How to integrate your op automatically

\begin{frame}[plain]{}
\begin{center}
\Huge Optimizations
\end{center}
\end{frame}

\begin{frame}{Purpose}
\begin{itemize}
\item End goal is to make code run faster
Expand All @@ -352,17 +378,18 @@ \section{Optimizations}

\begin{frame}{Replace an Op (V1)}
Here is code to use \code{DoubleOp} instead of \code{ScalMul(2)}.
\lstinputlisting[linerange={1-2,4-14}]{opt.py}
\lstinputlisting[linerange={1-5,9-15}]{opt.py}
\end{frame}

\begin{frame}{Replace an Op (V2)}
In this case since we are replacing one instance with another there is an easier way.
\lstinputlisting[linerange={1-2,14-17}]{opt.py}
\lstinputlisting[linerange={1-2,16-20}]{opt.py}
\end{frame}

\begin{frame}{Registering}
In any case you need to register your optimization.
\lstinputlisting[linerange={3-3,18-25}]{opt.py}
\lstinputlisting[linerange={6-10}]{opt.py}
\lstinputlisting[linerange={21-21}]{opt.py}
\end{frame}

\begin{frame}{Tests}
Expand Down
12 changes: 4 additions & 8 deletions opt.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from scalmulop import ScalMulV1
from doubleop import DoubleOp
from theano.compile import optdb

from theano.gof import local_optimizer

from theano.tensor.opt import register_specialize

@register_specialize
@local_optimizer([ScalMulV1])
def local_scalmul_double_v1(node):
if not (isinstance(node.op, ScalMulV1)
Expand All @@ -16,10 +18,4 @@ def local_scalmul_double_v1(node):

local_scalmul_double_v2 = OpSub(ScalMulV1(2), DoubleOp())

optdb['specialize'].register(
# name of optimization (must be unique)
'local_scalmul_double_v2',
# optimization function
local_scalmul_double_v2,
# tags to activate/deactivate as a group
'fast_run')
register_specialize(local_scalmul_double_v2)
5 changes: 0 additions & 5 deletions tripleop.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,3 @@ def infer_shape(self, node, i0_shapes):

def grad(self, inputs, output_grads):
return [output_grads[0] * `3`]

def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)

0 comments on commit 062412a

Please sign in to comment.