Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
Merge pull request #10 from radibnia77/main
Update lookalike application
  • Loading branch information
xun-hu-at-futurewei-com committed Jul 21, 2021
2 parents 41c3168 + f991fc8 commit 3950254a7970019340e9cca50c262714d0e1745c
Showing 16 changed files with 1,157 additions and 758 deletions.

This file was deleted.

This file was deleted.

This file was deleted.

@@ -42,10 +42,13 @@ def x(l1):
return _udf_distance

def run(hive_context, cfg):
# load dataframes
lookalike_loaded_table_norm = cfg['output']['gucdocs_loaded_table_norm']

# input tables
keywords_table = cfg["input"]["keywords_table"]
seeduser_table = cfg["input"]["seeduser_table"]
lookalike_loaded_table_norm = cfg['output']['gucdocs_loaded_table_norm']

# output dataframes
lookalike_score_table = cfg["output"]["score_table"]

command = "SELECT * FROM {}"
@@ -21,6 +21,21 @@
import argparse
from pyspark.sql.functions import udf
import time
import math

'''
spark-submit --executor-memory 16G --driver-memory 24G --num-executors 16 --executor-cores 5 --master yarn --conf spark.driver.maxResultSize=8g distance_table_list.py config.yml
'''


def euclidean(l1):
def _euclidean(l2):
list = []
for item in l1:
similarity = 1 - (math.sqrt(sum([(item[i]-l2[i]) ** 2 for i in range(len(item))]))/math.sqrt(len(item)))
list.append(similarity)
return list
return _euclidean


def dot(l1):
@@ -33,9 +48,11 @@ def _dot(l2):
return _dot



def ux(l1):
_udf_similarity = udf(dot(l1), ArrayType(FloatType()) )
if alg == "euclidean":
_udf_similarity = udf(euclidean(l1), ArrayType(FloatType()))
if alg =="dot":
_udf_similarity = udf(dot(l1), ArrayType(FloatType()))
return _udf_similarity


@@ -65,8 +82,9 @@ def _mean(l):
udf_mean = udf(_mean, FloatType())

def run(hive_context, cfg):
# load dataframes
lookalike_score_table_norm = cfg['output']['did_score_table_norm']

## load dataframes
lookalike_score_table_norm = cfg['output']['score_norm_table']
keywords_table = cfg["input"]["keywords_table"]
seeduser_table = cfg["input"]["seeduser_table"]
lookalike_similarity_table = cfg["output"]["similarity_table"]
@@ -78,7 +96,10 @@ def run(hive_context, cfg):


#### creating a tuple of did and kws for seed users
df = df.withColumn('kws_norm_list', udf_tolist(col('kws_norm')))
if alg == "dot":
df = df.withColumn('kws_norm_list', udf_tolist(col('kws_norm')))
if alg == "euclidean":
df = df.withColumn('kws_norm_list', udf_tolist(col('kws')))
df_seed_user = df_seed_user.join(df.select('did','kws_norm_list'), on=['did'], how='left')
seed_user_list = df_seed_user.select('did', 'kws_norm_list').collect()

@@ -115,6 +136,8 @@ def run(hive_context, cfg):
sc.setLogLevel('WARN')
hive_context = HiveContext(sc)

## select similarity algorithm
alg = cfg["input"]["alg"]
run(hive_context=hive_context, cfg=cfg)
sc.stop()
end = time.time()
@@ -0,0 +1,31 @@
score_generator:
input:
log_table : "lookalike_03042021_logs"
did_table: "lookalike_03042021_trainready"
keywords_table: "din_ad_keywords_09172020"
test_table: "lookalike_trainready_jimmy_test"
din_model_tf_serving_url: "http://10.193.217.105:8506/v1/models/lookalike3:predict"
din_model_length: 20
seeduser_table : "lookalike_seeduser"
number_of_seeduser: 1000
extend: 2000
alg: "euclidean" ##### currently just support "euclideand" and "dot"
output:
did_score_table: "lookalike_score_01112021"
score_norm_table: "lookalike_score_norm_01112021"

score_vector:
keywords_table: "din_ad_keywords_09172020"
score_norm_table: "lookalike_score_norm_01112021"
score_vector_table: "lookalike_score_vector_01112021"
did_bucket_size: 2
did_bucket_step: 2
score_vector_rebucketing:
did_bucket_size: 2
did_bucket_step: 2
alpha_did_bucket_size: 1000
score_vector_alpha_table: 'lookalike_score_vector_alpha_01112021'
top_n_similarity:
alpha_did_bucket_step: 100
top_n: 100
similarity_table: "lookalike_similarity_01112021"
@@ -0,0 +1,14 @@
#!/bin/bash

spark-submit --executor-memory 16G --driver-memory 24G --num-executors 16 --executor-cores 5 --master yarn --conf spark.driver.maxResultSize=8g seed_user_selector.py config.yml "29"

spark-submit --executor-memory 16G --driver-memory 24G --num-executors 16 --executor-cores 5 --master yarn --conf spark.driver.maxResultSize=8g score_generator.py config.yml

spark-submit --master yarn --num-executors 20 --executor-cores 5 --executor-memory 8G --driver-memory 8G --conf spark.driver.maxResultSize=5g --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict score_vector_table.py config.yml

spark-submit --master yarn --num-executors 20 --executor-cores 5 --executor-memory 8G --driver-memory 8G --conf spark.driver.maxResultSize=5g --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict score_vector_rebucketing.py config.

spark-submit --master yarn --num-executors 20 --executor-cores 5 --executor-memory 16G --driver-memory 16G --conf spark.driver.maxResultSize=5g --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict top_n_similarity_table_generator.py config.yml

spark-submit --executor-memory 16G --driver-memory 24G --num-executors 16 --executor-cores 5 --master yarn --conf spark.driver.maxResultSize=8g validation.py config.yml "29"

@@ -26,6 +26,15 @@
import argparse
from math import sqrt

'''
This process generates the score-norm-table with the following format.
DataFrame[age: int, gender: int, did: string, did_index: bigint,
interval_starting_time: array<string>, interval_keywords: array<string>,
kwi: array<string>, kwi_show_counts: array<string>, kwi_click_counts: array<string>,
did_bucket: string, kws: map<string,float>, kws_norm: map<string,float>]
'''


def flatten(lst):
@@ -69,12 +78,11 @@ def predict(serving_url, record, length, new_keyword):
return predictions



def gen_mappings_media(hive_context, cfg):
# this function generates mappings between the media category and the slots.
media_category_list = cfg["mapping"]["new_slot_id_media_category_list"]
media_category_list = cfg['score_generator']["mapping"]["new_slot_id_media_category_list"]
media_category_set = set(media_category_list)
slot_id_list = cfg["mapping"]["new_slot_id_list"]
slot_id_list = cfg['score_generator']["mapping"]["new_slot_id_list"]
# 1 vs 1: slot_id : media_category
media_slot_mapping = dict()
for media_category in media_category_set:
@@ -90,6 +98,7 @@ def gen_mappings_media(hive_context, cfg):
df = hive_context.createDataFrame(media_slot_mapping_rows, schema)
return df


def normalize(x):
c = 0
for key, value in x.items():
@@ -100,6 +109,7 @@ def normalize(x):
result[keyword] = value / C
return result


udf_normalize = udf(normalize, MapType(StringType(), FloatType()))


@@ -112,8 +122,6 @@ def __init__(self, df_did, df_keywords, din_model_tf_serving_url, din_model_leng
self.df_did_loaded = None
self.keyword_index_list, self.keyword_list = self.get_keywords()



def get_keywords(self):
keyword_index_list, keyword_list = list(), list()
for dfk in self.df_keywords.collect():
@@ -143,22 +151,16 @@ def __helper(did, kwi_show_counts, age, gender):

return did_kw_scores


return __helper

self.df_did_loaded = self.df_did.withColumn('kws',
udf(predict_udf(din_model_length=self.din_model_length,
din_model_tf_serving_url=self.din_model_tf_serving_url,
keyword_index_list=self.keyword_index_list,
keyword_list=self.keyword_list),
MapType(StringType(), FloatType()))
(col('did_index'), col('kwi_show_counts'),
col('age'), col('gender')))





udf(predict_udf(din_model_length=self.din_model_length,
din_model_tf_serving_url=self.din_model_tf_serving_url,
keyword_index_list=self.keyword_index_list,
keyword_list=self.keyword_list),
MapType(StringType(), FloatType()))
(col('did_index'), col('kwi_show_counts'),
col('age'), col('gender')))


if __name__ == "__main__":
@@ -168,30 +170,32 @@ def __helper(did, kwi_show_counts, age, gender):
with open(args.config_file, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)


sc = SparkContext.getOrCreate()
sc.setLogLevel('WARN')
hive_context = HiveContext(sc)

# load dataframes
did_table, keywords_table, din_tf_serving_url, length = cfg["input"]["did_table"], cfg["input"]["keywords_table"],cfg["input"]["din_model_tf_serving_url"],cfg["input"]["din_model_length"]
did_table, keywords_table, din_tf_serving_url, length = cfg['score_generator']["input"]["did_table"],
cfg['score_generator']["input"]["keywords_table"],
cfg['score_generator']["input"]["din_model_tf_serving_url"],
cfg['score_generator']["input"]["din_model_length"]

command = "SELECT * FROM {}"
df_did = hive_context.sql(command.format(did_table))
df_keywords = hive_context.sql(command.format(keywords_table))
###### temporary adding to filter based on active keywords
df_keywords = df_keywords.filter( (df_keywords.keyword =="video") | (df_keywords.keyword =="shopping") | (df_keywords.keyword == "info") |
(df_keywords.keyword =="social") | (df_keywords.keyword =="reading") | (df_keywords.keyword =="travel") |
(df_keywords.keyword =="entertainment") )
did_loaded_table = cfg['output']['did_score_table']
did_score_table_norm = cfg['output']['did_score_table_norm']
# temporary adding to filter based on active keywords
df_keywords = df_keywords.filter((df_keywords.keyword == "video") | (df_keywords.keyword == "shopping") | (df_keywords.keyword == "info") |
(df_keywords.keyword == "social") | (df_keywords.keyword == "reading") | (df_keywords.keyword == "travel") |
(df_keywords.keyword == "entertainment"))
did_loaded_table = cfg['score_generator']['output']['did_score_table']
score_norm_table = cfg['score_generator']['output']['score_norm_table']

# create a CTR score generator instance and run to get the loaded did
ctr_score_generator = CTRScoreGenerator(df_did, df_keywords, din_tf_serving_url, length)
ctr_score_generator.run()
df_did_loaded = ctr_score_generator.df_did_loaded
df_did_loaded_norm = df_did_loaded.withColumn('kws_norm', udf_normalize(col('kws')))

# save the loaded did to hive table
# save the loaded did to hive table
df_did_loaded_norm.write.option("header", "true").option(
"encoding", "UTF-8").mode("overwrite").format('hive').saveAsTable(did_score_table_norm)
"encoding", "UTF-8").mode("overwrite").format('hive').saveAsTable(score_norm_table)
@@ -0,0 +1,103 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at

# http://www.apache.org/licenses/LICENSE-2.0.html

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import yaml
import argparse
from pyspark import SparkContext
from pyspark.sql import HiveContext
from pyspark.sql.functions import lit, col, udf
from pyspark.sql.types import FloatType, StringType, StructType, StructField, ArrayType, MapType, IntegerType
# from rest_client import predict, str_to_intlist
import requests
import json
import argparse
from pyspark.sql.functions import udf
from math import sqrt
import time
import hashlib

'''
To run, execute the following in application folder.
spark-submit --master yarn --num-executors 20 --executor-cores 5 --executor-memory 8G --driver-memory 8G --conf spark.driver.maxResultSize=5g --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict score_vector_rebucketing.py config.yml
This process generates added secondary buckects ids (alpha-did-bucket).
'''


def __save_as_table(df, table_name, hive_context, create_table):

if create_table:
command = """
DROP TABLE IF EXISTS {}
""".format(table_name)

hive_context.sql(command)

df.createOrReplaceTempView("r907_temp_table")

command = """
CREATE TABLE IF NOT EXISTS {} as select * from r907_temp_table
""".format(table_name)

hive_context.sql(command)


def assign_new_bucket_id(df, n, new_column_name):
def __hash_sha256(s):
hex_value = hashlib.sha256(s.encode('utf-8')).hexdigest()
return int(hex_value, 16)
_udf = udf(lambda x: __hash_sha256(x) % n, IntegerType())
df = df.withColumn(new_column_name, _udf(df.did))
return df


def run(hive_context, cfg):

score_vector_table = cfg['score_vector']['score_vector_table']
bucket_size = cfg['score_vector_rebucketing']['did_bucket_size']
bucket_step = cfg['score_vector_rebucketing']['did_bucket_step']
alpha_bucket_size = cfg['score_vector_rebucketing']['alpha_did_bucket_size']
score_vector_alpha_table = cfg['score_vector_rebucketing']['score_vector_alpha_table']

first_round = True
for start_bucket in range(0, bucket_size, bucket_step):
command = "SELECT did, did_bucket, score_vector FROM {} WHERE did_bucket BETWEEN {} AND {}".format(score_vector_table, start_bucket, start_bucket+bucket_size-1)

df = hive_context.sql(command)
df = assign_new_bucket_id(df, alpha_bucket_size, 'alpha_did_bucket')
df = df.select('did', 'did_bucket', 'score_vector', 'alpha_did_bucket')
__save_as_table(df, table_name=score_vector_alpha_table, hive_context=hive_context, create_table=first_round)
first_round = False


if __name__ == "__main__":
start = time.time()
parser = argparse.ArgumentParser(description='')
parser.add_argument('config_file')
args = parser.parse_args()
with open(args.config_file, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)

sc = SparkContext.getOrCreate()
sc.setLogLevel('WARN')
hive_context = HiveContext(sc)

run(hive_context=hive_context, cfg=cfg)
sc.stop()
end = time.time()
print('Runtime of the program is:', (end - start))

0 comments on commit 3950254

Please sign in to comment.