Skip to content

Commit

Permalink
Cleanup ncf/pytorch.py
Browse files Browse the repository at this point in the history
  • Loading branch information
vatai committed Jul 7, 2020
1 parent 4d93a71 commit 26791d4
Showing 1 changed file with 41 additions and 25 deletions.
66 changes: 41 additions & 25 deletions benchmarker/modules/problems/ncf/pytorch.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
# This class is from https://github.com/mlperf.
import argparse

import numpy as np
import torch
import torch.nn as nn
import argparse

#This class is from https://github.com/mlperf.

class NeuMF(nn.Module):
def __init__(self, nb_users, nb_items,
mf_dim, mf_reg,
mlp_layer_sizes, mlp_layer_regs):
def __init__(
self, nb_users, nb_items, mf_dim, mf_reg, mlp_layer_sizes, mlp_layer_regs
):
if len(mlp_layer_sizes) != len(mlp_layer_regs):
raise RuntimeError('u dummy, layer_sizes != layer_regs!')
raise RuntimeError("u dummy, layer_sizes != layer_regs!")
if mlp_layer_sizes[0] % 2 != 0:
raise RuntimeError('u dummy, mlp_layer_sizes[0] % 2 != 0')
raise RuntimeError("u dummy, mlp_layer_sizes[0] % 2 != 0")
super(NeuMF, self).__init__()
nb_mlp_layers = len(mlp_layer_sizes)

Expand All @@ -22,24 +24,27 @@ def __init__(self, nb_users, nb_items,

self.mlp = nn.ModuleList()
for i in range(1, nb_mlp_layers):
self.mlp.extend([nn.Linear(mlp_layer_sizes[i - 1], mlp_layer_sizes[i])]) # noqa: E501
self.mlp.extend(
[nn.Linear(mlp_layer_sizes[i - 1], mlp_layer_sizes[i])]
) # noqa: E501

self.final = nn.Linear(mlp_layer_sizes[-1] + mf_dim, 1)

self.mf_user_embed.weight.data.normal_(0., 0.01)
self.mf_item_embed.weight.data.normal_(0., 0.01)
self.mlp_user_embed.weight.data.normal_(0., 0.01)
self.mlp_item_embed.weight.data.normal_(0., 0.01)
self.mf_user_embed.weight.data.normal_(0.0, 0.01)
self.mf_item_embed.weight.data.normal_(0.0, 0.01)
self.mlp_user_embed.weight.data.normal_(0.0, 0.01)
self.mlp_item_embed.weight.data.normal_(0.0, 0.01)

def golorot_uniform(layer):
fan_in, fan_out = layer.in_features, layer.out_features
limit = np.sqrt(6. / (fan_in + fan_out))
limit = np.sqrt(6.0 / (fan_in + fan_out))
layer.weight.data.uniform_(-limit, limit)

def lecunn_uniform(layer):
fan_in, fan_out = layer.in_features, layer.out_features # noqa: F841, E501
limit = np.sqrt(3. / fan_in)
limit = np.sqrt(3.0 / fan_in)
layer.weight.data.uniform_(-limit, limit)

for layer in self.mlp:
if type(layer) != nn.Linear:
continue
Expand All @@ -66,21 +71,32 @@ def forward(self, data, sigmoid=False):
x = torch.sigmoid(x)
return x


def get_kernel(params, unparsed_args=None):
assert params["mode"] == "inference"
parser = argparse.ArgumentParser(description='Benchmark mlperf recommendation model')
parser.add_argument('--factors', type=int, default=8,
help='number of predictive factors')
parser.add_argument('--layers', nargs='+', type=int,default=[64, 32, 16, 8],
help='size of hidden layers for MLP')
parser = argparse.ArgumentParser(
description="Benchmark mlperf recommendation model"
)
parser.add_argument("--nb_users", type=int, default=128, help="number of users")
parser.add_argument("--nb_items", type=int, default=128, help="number of items")
parser.add_argument(
"--factors", type=int, default=8, help="number of predictive factors"
)
parser.add_argument(
"--layers",
nargs="+",
type=int,
default=[64, 32, 16, 8],
help="size of hidden layers for MLP",
)
args = parser.parse_args(unparsed_args)
params["problem"].update(vars(args))
Net = NeuMF(nb_users=params["problem"]["size"][1],
nb_items=params["problem"]["size"][2],
mf_dim=args.factors, mf_reg=0.,
mlp_layer_sizes=args.layers,
mlp_layer_regs=[0. for i in args.layers])
Net = NeuMF(
nb_users=args.nb_users,
nb_items=args.nb_items,
mf_dim=args.factors,
mf_reg=0.0,
mlp_layer_sizes=args.layers,
mlp_layer_regs=[0.0 for i in args.layers],
)
return Net

0 comments on commit 26791d4

Please sign in to comment.