Skip to content

Commit

Permalink
generate lenses with hprotoc (version >= 2.1.7)
Browse files Browse the repository at this point in the history
  • Loading branch information
timjb committed Sep 17, 2015
1 parent 206df0b commit 85bb39b
Show file tree
Hide file tree
Showing 8 changed files with 86 additions and 158 deletions.
28 changes: 14 additions & 14 deletions NN/Backend/Caffe.hs
Expand Up @@ -26,19 +26,19 @@ middleEnd :: Net -> Net
middleEnd = optimizeWith caffePasses

layerName :: LayerParameter -> Int -> Utf8
layerName l i = printf "%s_%d" (type' l & fromJust & toString & map toLower) i & s
layerName l i = printf "%s_%d" (_type' l & fromJust & toString & map toLower) i & s

backend :: Net -> NetParameter
backend gr = def & _layer <>~ S.fromList (topsort' gr)
backend gr = def & layer <>~ S.fromList (topsort' gr)

addLabels :: Pass
addLabels (_, _, lp) = lp & update (layerTy lp)
where
-- Data has labels going out
update Data = LP._top <>~ S.singleton (s "label")
update Data = LP.top <>~ S.singleton (s "label")
-- Criterion have labels coming in
update SoftmaxWithLoss = LP._bottom <>~ S.singleton (s "label")
update Accuracy = LP._bottom <>~ S.singleton (s "label")
update SoftmaxWithLoss = LP.bottom <>~ S.singleton (s "label")
update Accuracy = LP.bottom <>~ S.singleton (s "label")
-- Everything else can be ignored
update _ = id

Expand All @@ -52,8 +52,8 @@ optimizeInPlaceLayer layerTy' = [updateIfInPlace, updateIfParentInPlace] where
inPlaceParents gr i = filter inPlace . map fst $ pres gr i

updateIfInPlace (_, i, lp) =
case (layerTy lp == layerTy', F.toList (top lp)) of
(True, [_]) -> lp & LP._top .~ bottom lp
case (layerTy lp == layerTy', F.toList (LP._top lp)) of
(True, [_]) -> lp & LP.top .~ LP._bottom lp
(True, _) -> error $ printf "Can only have one output for an in-place layer" ++ show (layerName lp i)
(False, _) -> lp

Expand All @@ -70,11 +70,11 @@ optimizeInPlaceLayer layerTy' = [updateIfInPlace, updateIfParentInPlace] where
parents ->
-- TODO this is super dodgy and incorrect in the general
-- case (there are some weird invariants we rely on), but it works for now.
if length parents /= (length . F.toList . bottom) lp
if length parents /= (length . F.toList . LP._bottom) lp
then Left $ printf "Must have all parents in-place for in-place optimizations" ++ show (layerName lp i)
else let parentTops = F.concatMap (F.toList . LP.top) parents in
if length parentTops == length ((F.toList . LP.bottom) lp)
then Right $ lp & LP._bottom .~ S.fromList parentTops
else let parentTops = F.concatMap (F.toList . LP._top) parents in
if length parentTops == length ((F.toList . LP._bottom) lp)
then Right $ lp & LP.bottom .~ S.fromList parentTops
else Left $ error "asdf"

labelled :: Net -> [Node] -> [(LayerParameter, Node)]
Expand All @@ -85,6 +85,6 @@ pres gr j = labelled gr (G.pre gr j)

addConnection :: Pass
addConnection (gr, i, lp) = lp
& LP._name ?~ layerName lp i
& LP._bottom .~ S.fromList (map (uncurry layerName) (pres gr i))
& LP._top <>~ S.singleton (layerName lp i)
& LP.name ?~ layerName lp i
& LP.bottom .~ S.fromList (map (uncurry layerName) (pres gr i))
& LP.top <>~ S.singleton (layerName lp i)
22 changes: 11 additions & 11 deletions NN/Backend/Torch/Torch.hs
Expand Up @@ -49,34 +49,34 @@ torchModules lp = go (layerTy lp)
-- Ugly case anaysis, sorry.
go Pool = [nn ty' [kW, kH, dW, dH]]
where
kW = poolP PP._kernel_size
kW = poolP PP.kernel_size
kH = kW
dW = poolP PP._stride
dW = poolP PP.stride
dH = dW
ty' = case poolP PP._pool of
ty' = case poolP PP.pool of
Just MAX -> "SpatialMaxPooling"
Just AVE -> "SpatialAveragePooling"
_ -> error "Unsupported Pooling Type"
poolP f = lp ^?! LP._pooling_param._Just ^?! f
poolP f = lp ^?! LP.pooling_param._Just ^?! f
go Conv = [nn (convolutionImpl kW) [nInputPlane, nOutputPlane, kW, kH, dW, dH, padding]]
where
kW = convP CP._kernel_size
kW = convP CP.kernel_size
kH = kW
dW = convP CP._stride
dW = convP CP.stride
dH = dW
padding = convP CP._pad
padding = convP CP.pad
-- TODO - propagation pass to size the layers
nInputPlane = Nothing
nOutputPlane = convP CP._num_output
convP f = lp ^?! LP._convolution_param._Just ^?! f
nOutputPlane = convP CP.num_output
convP f = lp ^?! LP.convolution_param._Just ^?! f
go ReLU = [nn' "Threshold"]
go IP = [nn "Linear" [nInput, nOutput]]
where
-- TODO - propagation pass to size the layers
nInput = Nothing
nOutput = lp ^?! LP._inner_product_param._Just ^?! IP._num_output
nOutput = lp ^?! LP.inner_product_param._Just ^?! IP.num_output
go Dropout = [nn "Dropout" [ratio]] where
Just ratio = lp ^?! LP._dropout_param._Just ^?! DP._dropout_ratio
Just ratio = lp ^?! LP.dropout_param._Just ^?! DP.dropout_ratio
go SoftmaxWithLoss = [nn' "LogSoftMax", criterion "ClassNLLCriterion"]
go Concat = [] -- Handled by flattening implementation
go ty' = error $ "Unhandled layer type: " ++ show ty'
Expand Down
92 changes: 46 additions & 46 deletions NN/DSL.hs
Expand Up @@ -71,74 +71,74 @@ s = P.fromString
def :: Default a => a
def = P.defaultValue

ty type'' = LP._type' ?~ s (asCaffe type'')
ty type'' = LP.type' ?~ s (asCaffe type'')

layerTy :: LayerParameter -> LayerTy
layerTy l = fromJust (LP.type' l) & toString & toCaffe & fromJust
layerTy l = fromJust (LP._type' l) & toString & toCaffe & fromJust

phase' phase'' = LP._include <>~ singleton (def & _phase ?~ phase'')
phase' phase'' = LP.include <>~ singleton (def & phase ?~ phase'')

param' v = _param .~ fromList v
param' v = param .~ fromList v

-- Data
setF outer f n = set (outer . _Just . f) (Just n)
source' source'' = setF _data_param DP._source (s source'')
cropSize' = setF _transform_param TP._crop_size
meanFile' meanFile'' = setF _transform_param TP._mean_file (s meanFile'')
mirror' = setF _transform_param TP._mirror
batchSize' = setF _data_param DP._batch_size
backend' = setF _data_param DP._backend
source' source'' = setF data_param DP.source (s source'')
cropSize' = setF transform_param TP.crop_size
meanFile' meanFile'' = setF transform_param TP.mean_file (s meanFile'')
mirror' = setF transform_param TP.mirror
batchSize' = setF data_param DP.batch_size
backend' = setF data_param DP.backend

-- Convolution
setConv = setF _convolution_param
numOutputC' = setConv CP._num_output
numInputC' = setConv CP._num_input
kernelSizeC' = setConv CP._kernel_size
padC' = setConv CP._pad
groupC' = setConv CP._group
strideC' = setConv CP._stride
biasFillerC' = setConv CP._bias_filler
weightFillerC' = setConv CP._weight_filler
setConv = setF convolution_param
numOutputC' = setConv CP.num_output
numInputC' = setConv CP.num_input
kernelSizeC' = setConv CP.kernel_size
padC' = setConv CP.pad
groupC' = setConv CP.group
strideC' = setConv CP.stride
biasFillerC' = setConv CP.bias_filler
weightFillerC' = setConv CP.weight_filler

-- Pooling
setPool = setF _pooling_param
pool' = setPool PP._pool
sizeP' = setPool PP._kernel_size
strideP' = setPool PP._stride
padP' = setPool PP._pad
setPool = setF pooling_param
pool' = setPool PP.pool
sizeP' = setPool PP.kernel_size
strideP' = setPool PP.stride
padP' = setPool PP.pad

-- Inner Product
setIP = setF _inner_product_param
weightFillerIP' = setIP IP._weight_filler
numInputIP' = setIP IP._num_input
numOutputIP' = setIP IP._num_output
biasFillerIP' = setIP IP._bias_filler
setIP = setF inner_product_param
weightFillerIP' = setIP IP.weight_filler
numInputIP' = setIP IP.num_input
numOutputIP' = setIP IP.num_output
biasFillerIP' = setIP IP.bias_filler

-- LRN
setLRN = setF _lrn_param
localSize' = setLRN LRN._local_size
alphaLRN' = setLRN LRN._alpha
betaLRN' = setLRN LRN._beta
setLRN = setF lrn_param
localSize' = setLRN LRN.local_size
alphaLRN' = setLRN LRN.alpha
betaLRN' = setLRN LRN.beta

-- Fillers
constant value' = def & FP._type' ?~ s "constant" & _value ?~ value'
gaussian std' = def & FP._type' ?~ s "gaussian" & _std ?~ std'
xavier std' = def & FP._type' ?~ s "xavier" & _std ?~ std'
constant value' = def & FP.type' ?~ s "constant" & value ?~ value'
gaussian std' = def & FP.type' ?~ s "gaussian" & std ?~ std'
xavier std' = def & FP.type' ?~ s "xavier" & std ?~ std'
zero = constant 0.0

-- Multipler
lrMult' value' = _lr_mult ?~ value'
decayMult' value' = _decay_mult ?~ value'
lrMult' value' = lr_mult ?~ value'
decayMult' value' = decay_mult ?~ value'

-- Simple Layers
accuracy k' = def & ty Accuracy & phase' TEST & _accuracy_param ?~ (def & AP._top_k ?~ k')
accuracy k' = def & ty Accuracy & phase' TEST & accuracy_param ?~ (def & AP.top_k ?~ k')
softmax = def & ty SoftmaxWithLoss
dropout ratio = def & ty Dropout & _dropout_param ?~ (def & _dropout_ratio ?~ ratio)
dropout ratio = def & ty Dropout & dropout_param ?~ (def & dropout_ratio ?~ ratio)
relu = def & ty ReLU
conv = def & ty Conv & _convolution_param ?~ def
ip n = def & ty IP & _inner_product_param ?~ def & numOutputIP' n
data' = def & ty Data & _transform_param ?~ def & _data_param ?~ def
maxPool = def & ty Pool & _pooling_param ?~ def & pool' MAX
avgPool = def & ty Pool & _pooling_param ?~ def & pool' AVE
lrn = def & ty LRN & _lrn_param ?~ def
conv = def & ty Conv & convolution_param ?~ def
ip n = def & ty IP & inner_product_param ?~ def & numOutputIP' n
data' = def & ty Data & transform_param ?~ def & data_param ?~ def
maxPool = def & ty Pool & pooling_param ?~ def & pool' MAX
avgPool = def & ty Pool & pooling_param ?~ def & pool' AVE
lrn = def & ty LRN & lrn_param ?~ def
concat' = def & ty Concat
2 changes: 1 addition & 1 deletion NN/Examples/Demo.hs
Expand Up @@ -21,7 +21,7 @@ import NN.Examples.GoogLeNet
caffe :: IO ()
caffe = do
let output = parse googLeNet & Caffe.middleEnd & Caffe.backend
let names = output ^. NP._layer ^.. traverse . LP._name ^.. traverse . _Just
let names = output ^. NP.layer ^.. traverse . LP.name ^.. traverse . _Just
print names

torch :: IO ()
Expand Down
6 changes: 3 additions & 3 deletions NN/Examples/GoogLeNet.hs
Expand Up @@ -31,7 +31,7 @@ conv2 = googleConv & numOutputC' 192 & padC' 1 & kernelSizeC' 3 & weightFillerC'
topPool = avgPool & sizeP' 7 & strideP' 1
topFc = googleIP 1000 & biasFillerIP' (constant 0) & weightFillerIP' (xavier 0.0)
-- Weird, but in Caffe replication
& _inner_product_param._Just.IP._weight_filler._Just._std .~ Nothing
& inner_product_param._Just.IP.weight_filler._Just.std .~ Nothing

data Inception = Inception {_1x1, _3x3reduce, _3x3, _5x5reduce, _5x5, _poolProj :: Word32}

Expand All @@ -48,14 +48,14 @@ inception input Inception{..} = do
[googleConv & numOutputC' _1x1 & kernelSizeC' 1 & weightFillerC' (xavier 0.03), relu],
[googleConv & numOutputC' _3x3reduce & kernelSizeC' 1 & weightFillerC' (xavier 0.09), relu, googleConv & numOutputC' _3x3 & kernelSizeC' 3 & weightFillerC' (xavier 0.03) & padC' 1, relu],
[googleConv & numOutputC' _5x5reduce & kernelSizeC' 1 & weightFillerC' (xavier 0.2), relu, googleConv & numOutputC' _5x5 & kernelSizeC' 5 & weightFillerC' (xavier 0.03) & padC' 2, relu],
[maxPool& sizeP' 3 & strideP' 3 & padP' 1, googleConv & numOutputC' _poolProj & kernelSizeC' 1 & weightFillerC' (xavier 0.1), relu]]
[maxPool & sizeP' 3 & strideP' 3 & padP' 1, googleConv & numOutputC' _poolProj & kernelSizeC' 1 & weightFillerC' (xavier 0.1), relu]]

intermediateClassifier :: Node -> NetBuilder ()
intermediateClassifier source = do
(input, representation) <- sequential [pool1, conv1', relu, fc1, relu, dropout 0.7, fc2]
source >-> input

forM_ [accuracy 1, accuracy 5, softmax & _loss_weight <>~ singleton 0.3] $ attach (From representation)
forM_ [accuracy 1, accuracy 5, softmax & loss_weight <>~ singleton 0.3] $ attach (From representation)
where
pool1 = avgPool & sizeP' 5 & strideP' 3
conv1' = googleConv & numOutputC' 128 & kernelSizeC' 1 & weightFillerC' (xavier 0.08)
Expand Down
20 changes: 10 additions & 10 deletions NN/Visualize.hs
Expand Up @@ -77,24 +77,24 @@ dimensions lp = params ty'
params Conv = L.unlines ["Conv",
format (int % "x" % int % "x" % int % " s" % int) o sz sz st]
where
cp f = lp ^?! _convolution_param._Just ^?! f._Just
sz = cp CP._kernel_size
st = cp CP._stride
o = cp CP._num_output
cp f = lp ^?! convolution_param._Just ^?! f._Just
sz = cp CP.kernel_size
st = cp CP.stride
o = cp CP.num_output
params IP = L.unlines ["FC", format int ip']
where
ip' = lp ^?! _inner_product_param._Just ^?! IP._num_output._Just
ip' = lp ^?! inner_product_param._Just ^?! IP.num_output._Just
-- 3x3 s2
params Pool = L.unlines [pty', format (int % "x" % int % " s" % int) sz sz st]
where
pp f = lp ^?! _pooling_param._Just ^?! f._Just
pty' = case pp _pool of
pp f = lp ^?! pooling_param._Just ^?! f._Just
pty' = case pp pool of
MAX -> "MaxPool"
AVE -> "AveragePool"
STOCHASTIC -> "StochasticPool"
sz = pp PP._kernel_size
st = pp PP._stride
sz = pp PP.kernel_size
st = pp PP.stride
params Accuracy = format ("Top-" % int) k
where
k = lp ^?! _accuracy_param._Just ^?! _top_k._Just
k = lp ^?! accuracy_param._Just ^?! top_k._Just
params _ = (L.pack . asCaffe) ty'
65 changes: 0 additions & 65 deletions add_lenses.py

This file was deleted.

9 changes: 1 addition & 8 deletions lens_proto.sh
Expand Up @@ -3,14 +3,7 @@
set -e

rm -r Gen/ || true
hprotoc --prefix Gen caffe.proto
(
cd Gen
for f in $(find . -iname "*.hs"); do
echo $f
../add_lenses.py $f || true
done
)
hprotoc --prefix Gen --lenses caffe.proto

protoc caffe.proto --python_out=Gen/
touch Gen/__init__.py

0 comments on commit 85bb39b

Please sign in to comment.