diff --git a/Params.py b/Params.py index e210f67..7c12a2a 100644 --- a/Params.py +++ b/Params.py @@ -5,256 +5,81 @@ def parse_args(): parser = argparse.ArgumentParser(description='Model Params') - -#---------Tmall-------------------------------------------------------------------------------------------------------------- - # #for this model - # parser.add_argument('--hidden_dim', default=16, type=int, help='embedding size') - # parser.add_argument('--gnn_layer', default="[16,16,16]", type=str, help='gnn layers: number + dim') - # parser.add_argument('--dataset', default='Tmall', type=str, help='name of dataset') - # parser.add_argument('--point', default='for_meta_hidden_dim', type=str, help='') - # parser.add_argument('--title', default='dim__8', type=str, help='title of model') - # parser.add_argument('--sampNum', default=40, type=int, help='batch size for sampling') - - # #for train - # parser.add_argument('--lr', default=3e-4, type=float, help='learning rate') - # parser.add_argument('--opt_base_lr', default=1e-3, type=float, help='learning rate') - # parser.add_argument('--opt_max_lr', default=5e-3, type=float, help='learning rate') - # parser.add_argument('--opt_weight_decay', default=1e-4, type=float, help='weight decay regularizer') - # parser.add_argument('--meta_opt_base_lr', default=1e-4, type=float, help='learning rate') - # parser.add_argument('--meta_opt_max_lr', default=2e-3, type=float, help='learning rate') - # parser.add_argument('--meta_opt_weight_decay', default=1e-4, type=float, help='weight decay regularizer') - # parser.add_argument('--meta_lr', default=1e-3, type=float, help='_meta_learning rate') - - # parser.add_argument('--batch', default=8192, type=int, help='batch size') - # parser.add_argument('--meta_batch', default=128, type=int, help='batch size') - # parser.add_argument('--SSL_batch', default=18, type=int, help='batch size') - # parser.add_argument('--reg', default=1e-3, type=float, help='weight decay regularizer') - # parser.add_argument('--beta', default=0.005, type=float, help='scale of infoNCELoss') - # parser.add_argument('--epoch', default=300, type=int, help='number of epochs') - # # parser.add_argument('--decay', default=0.96, type=float, help='weight decay rate') - # parser.add_argument('--shoot', default=10, type=int, help='K of top k') - # parser.add_argument('--inner_product_mult', default=1, type=float, help='multiplier for the result') - # parser.add_argument('--drop_rate', default=0.8, type=float, help='drop_rate') - # parser.add_argument('--drop_rate1', default=0.5, type=float, help='drop_rate') - # parser.add_argument('--seed', type=int, default=6) - # parser.add_argument('--slope', type=float, default=0.1) - # parser.add_argument('--patience', type=int, default=100) - # #for save and read - # parser.add_argument('--path', default='/home/ww/Code/DATASET/work3_dataset/', type=str, help='data path') - # parser.add_argument('--save_path', default='tem', help='file name to save model and training record') - # parser.add_argument('--load_model', default=None, help='model name to load') - # parser.add_argument('--target', default='buy', type=str, help='target behavior to predict on') - # parser.add_argument('--isload', default=False , type=bool, help='whether load model') - # parser.add_argument('--isJustTest', default=False , type=bool, help='whether load model') - # parser.add_argument('--loadModelPath', default='/home/ww/Code/work3/BSTRec/Model/Tmall/for_meta_hidden_dim_dim__8_Tmall_2021_07_08__01_35_54_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth', type=str, help='loadModelPath') - # #Tmall: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/Tmall/for_meta_hidden_dim_dim__8_Tmall_2021_07_08__01_35_54_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth" - # #IJCAI_15: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/IJCAI_15/for_meta_hidden_dim_dim__8_IJCAI_15_2021_07_10__14_11_55_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth" - # #retailrocket: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/retailrocket/for_meta_hidden_dim_dim__8_retailrocket_2021_07_10__18_35_32_lr_0.0003_reg_0.01_batch_size_1024_gnn_layer_[16,16,16].pth" - - - # #use less - # # parser.add_argument('--memosize', default=2, type=int, help='memory size') - # parser.add_argument('--head_num', default=4, type=int, help='head_num_of_multihead_attention') - # parser.add_argument('--beta_multi_behavior', default=0.005, type=float, help='scale of infoNCELoss') - # parser.add_argument('--sampNum_slot', default=30, type=int, help='SSL_step') - # parser.add_argument('--SSL_slot', default=1, type=int, help='SSL_step') - # parser.add_argument('--k', default=2, type=float, help='MFB') - # parser.add_argument('--meta_time_rate', default=0.8, type=float, help='gating rate') - # parser.add_argument('--meta_behavior_rate', default=0.8, type=float, help='gating rate') - # parser.add_argument('--meta_slot', default=2, type=int, help='epoch number for each SSL') - # parser.add_argument('--time_slot', default=60*60*24*360, type=float, help='length of time slots') - # parser.add_argument('--hidden_dim_meta', default=16, type=int, help='embedding size') - # # parser.add_argument('--att_head', default=2, type=int, help='number of attention heads') - # # parser.add_argument('--gnn_layer', default=2, type=int, help='number of gnn layers') - # # parser.add_argument('--trnNum', default=10000, type=int, help='number of training instances per epoch') - # # parser.add_argument('--deep_layer', default=0, type=int, help='number of deep layers to make the final prediction') - # # parser.add_argument('--iiweight', default=0.3, type=float, help='weight for ii') - # # parser.add_argument('--graphSampleN', default=10000, type=int, help='use 25000 for training and 200000 for testing, empirically') - # # parser.add_argument('--divSize', default=1000, type=int, help='div size for smallTestEpoch') - # # parser.add_argument('--tstEpoch', default=1, type=int, help='number of epoch to test while training') - # # parser.add_argument('--subUsrSize', default=10, type=int, help='number of item for each sub-user') - # # parser.add_argument('--subUsrDcy', default=0.9, type=float, help='decay factor for sub-users over time') - # # parser.add_argument('--slot', default=0.5, type=float, help='length of time slots') -# ---------Tmall-------------------------------------------------------------------------------------------------------------- - - -# # # #---------IJCAI-------------------------------------------------------------------------------------------------------------- - # #for this model - # parser.add_argument('--hidden_dim', default=16, type=int, help='embedding size') - # parser.add_argument('--gnn_layer', default="[16,16,16]", type=str, help='gnn layers: number + dim') - # parser.add_argument('--dataset', default='IJCAI_15', type=str, help='name of dataset') - # parser.add_argument('--point', default='for_meta_hidden_dim', type=str, help='') - # parser.add_argument('--title', default='dim__8', type=str, help='title of model') - # parser.add_argument('--sampNum', default=10, type=int, help='batch size for sampling') - - # #for train - # parser.add_argument('--lr', default=3e-4, type=float, help='learning rate') - # parser.add_argument('--opt_base_lr', default=1e-3, type=float, help='learning rate') - # parser.add_argument('--opt_max_lr', default=2e-3, type=float, help='learning rate') - # parser.add_argument('--opt_weight_decay', default=1e-4, type=float, help='weight decay regularizer') - # parser.add_argument('--meta_opt_base_lr', default=1e-4, type=float, help='learning rate') - # parser.add_argument('--meta_opt_max_lr', default=1e-3, type=float, help='learning rate') - # parser.add_argument('--meta_opt_weight_decay', default=1e-4, type=float, help='weight decay regularizer') - # parser.add_argument('--meta_lr', default=1e-3, type=float, help='_meta_learning rate') - - # parser.add_argument('--batch', default=8192, type=int, help='batch size') - # parser.add_argument('--meta_batch', default=128, type=int, help='batch size') - # parser.add_argument('--SSL_batch', default=30, type=int, help='batch size') - # parser.add_argument('--reg', default=1e-3, type=float, help='weight decay regularizer') - # parser.add_argument('--beta', default=0.005, type=float, help='scale of infoNCELoss') - # parser.add_argument('--epoch', default=300, type=int, help='number of epochs') - # # parser.add_argument('--decay', default=0.96, type=float, help='weight decay rate') - # parser.add_argument('--shoot', default=10, type=int, help='K of top k') - # parser.add_argument('--inner_product_mult', default=1, type=float, help='multiplier for the result') - # parser.add_argument('--drop_rate', default=0.8, type=float, help='drop_rate') - # parser.add_argument('--drop_rate1', default=0.5, type=float, help='drop_rate') - # parser.add_argument('--seed', type=int, default=6) - # parser.add_argument('--slope', type=float, default=0.1) - # parser.add_argument('--patience', type=int, default=100) - # #for save and read - # parser.add_argument('--path', default='/home/ww/Code/DATASET/work3_dataset/', type=str, help='data path') - # parser.add_argument('--save_path', default='tem', help='file name to save model and training record') - # parser.add_argument('--load_model', default=None, help='model name to load') - # parser.add_argument('--target', default='buy', type=str, help='target behavior to predict on') - # parser.add_argument('--isload', default=False , type=bool, help='whether load model') - # parser.add_argument('--isJustTest', default=False , type=bool, help='whether load model') - # parser.add_argument('--loadModelPath', default='/home/ww/Code/work3/BSTRec/Model/IJCAI_15/for_meta_hidden_dim_dim__8_IJCAI_15_2021_07_10__14_11_55_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth', type=str, help='loadModelPath') - # # #Tmall: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/Tmall/for_meta_hidden_dim_dim__8_Tmall_2021_07_08__01_35_54_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth" - # # #IJCAI_15: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/IJCAI_15/for_meta_hidden_dim_dim__8_IJCAI_15_2021_07_10__14_11_55_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth" - # # #retailrocket: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/retailrocket/for_meta_hidden_dim_dim__8_retailrocket_2021_07_10__18_35_32_lr_0.0003_reg_0.01_batch_size_1024_gnn_layer_[16,16,16].pth" - - - - # #use less - # # parser.add_argument('--memosize', default=2, type=int, help='memory size') - # parser.add_argument('--head_num', default=4, type=int, help='head_num_of_multihead_attention') - # parser.add_argument('--beta_multi_behavior', default=0.005, type=float, help='scale of infoNCELoss') - # parser.add_argument('--sampNum_slot', default=30, type=int, help='SSL_step') - # parser.add_argument('--SSL_slot', default=1, type=int, help='SSL_step') - # parser.add_argument('--k', default=2, type=float, help='MFB') - # parser.add_argument('--meta_time_rate', default=0.8, type=float, help='gating rate') - # parser.add_argument('--meta_behavior_rate', default=0.8, type=float, help='gating rate') - # parser.add_argument('--meta_slot', default=2, type=int, help='epoch number for each SSL') - # parser.add_argument('--time_slot', default=60*60*24*360, type=float, help='length of time slots') - # parser.add_argument('--hidden_dim_meta', default=16, type=int, help='embedding size') - # # parser.add_argument('--att_head', default=2, type=int, help='number of attention heads') - # # parser.add_argument('--gnn_layer', default=2, type=int, help='number of gnn layers') - # # parser.add_argument('--trnNum', default=10000, type=int, help='number of training instances per epoch') - # # parser.add_argument('--deep_layer', default=0, type=int, help='number of deep layers to make the final prediction') - # # parser.add_argument('--iiweight', default=0.3, type=float, help='weight for ii') - # # parser.add_argument('--graphSampleN', default=10000, type=int, help='use 25000 for training and 200000 for testing, empirically') - # # parser.add_argument('--divSize', default=1000, type=int, help='div size for smallTestEpoch') - # # parser.add_argument('--tstEpoch', default=1, type=int, help='number of epoch to test while training') - # # parser.add_argument('--subUsrSize', default=10, type=int, help='number of item for each sub-user') - # # parser.add_argument('--subUsrDcy', default=0.9, type=float, help='decay factor for sub-users over time') - # # parser.add_argument('--slot', default=0.5, type=float, help='length of time slots') -# #---------IJCAI-------------------------------------------------------------------------------------------------------------- - - -# # ---------retail-------------------------------------------------------------------------------------------------------------- - #for this model - parser.add_argument('--hidden_dim', default=16, type=int, help='embedding size') - parser.add_argument('--gnn_layer', default="[16,16,16]", type=str, help='gnn layers: number + dim') - parser.add_argument('--dataset', default='retailrocket', type=str, help='name of dataset') - parser.add_argument('--point', default='for_meta_hidden_dim', type=str, help='') - parser.add_argument('--title', default='dim__8', type=str, help='title of model') - parser.add_argument('--sampNum', default=40, type=int, help='batch size for sampling') +# #for this model +# parser.add_argument('--hidden_dim', default=16, type=int, help='embedding size') +# parser.add_argument('--gnn_layer', default="[16,16,16]", type=str, help='gnn layers: number + dim') +# parser.add_argument('--dataset', default='IJCAI_15', type=str, help='name of dataset') +# parser.add_argument('--point', default='for_meta_hidden_dim', type=str, help='') +# parser.add_argument('--title', default='dim__8', type=str, help='title of model') +# parser.add_argument('--sampNum', default=10, type=int, help='batch size for sampling') - #for train - parser.add_argument('--lr', default=3e-4, type=float, help='learning rate') - parser.add_argument('--opt_base_lr', default=1e-4, type=float, help='learning rate') - parser.add_argument('--opt_max_lr', default=1e-3, type=float, help='learning rate') - parser.add_argument('--opt_weight_decay', default=1e-4, type=float, help='weight decay regularizer') - parser.add_argument('--meta_opt_base_lr', default=1e-4, type=float, help='learning rate') - parser.add_argument('--meta_opt_max_lr', default=1e-3, type=float, help='learning rate') - parser.add_argument('--meta_opt_weight_decay', default=1e-3, type=float, help='weight decay regularizer') - parser.add_argument('--meta_lr', default=1e-3, type=float, help='_meta_learning rate') - - parser.add_argument('--batch', default=2048, type=int, help='batch size') - parser.add_argument('--meta_batch', default=128, type=int, help='batch size') - parser.add_argument('--SSL_batch', default=15, type=int, help='batch size') - parser.add_argument('--reg', default=1e-2, type=float, help='weight decay regularizer') - parser.add_argument('--beta', default=0.005, type=float, help='scale of infoNCELoss') - parser.add_argument('--epoch', default=200, type=int, help='number of epochs') - # parser.add_argument('--decay', default=0.96, type=float, help='weight decay rate') - parser.add_argument('--shoot', default=10, type=int, help='K of top k') - parser.add_argument('--inner_product_mult', default=1, type=float, help='multiplier for the result') - parser.add_argument('--drop_rate', default=0.8, type=float, help='drop_rate') - parser.add_argument('--drop_rate1', default=0.5, type=float, help='drop_rate') - parser.add_argument('--seed', type=int, default=6) - parser.add_argument('--slope', type=float, default=0.1) - parser.add_argument('--patience', type=int, default=100) - #for save and read - parser.add_argument('--path', default='/home/ww/Code/DATASET/work3_dataset/', type=str, help='data path') - parser.add_argument('--save_path', default='tem', help='file name to save model and training record') - parser.add_argument('--load_model', default=None, help='model name to load') - parser.add_argument('--target', default='buy', type=str, help='target behavior to predict on') - parser.add_argument('--isload', default=False , type=bool, help='whether load model') - parser.add_argument('--isJustTest', default=False , type=bool, help='whether load model') - parser.add_argument('--loadModelPath', default='/home/ww/Code/work3/BSTRec/Model/retailrocket/for_meta_hidden_dim_dim__8_retailrocket_2021_07_10__18_35_32_lr_0.0003_reg_0.01_batch_size_1024_gnn_layer_[16,16,16].pth', type=str, help='loadModelPath') - - - - #use less - # parser.add_argument('--memosize', default=2, type=int, help='memory size') - parser.add_argument('--head_num', default=4, type=int, help='head_num_of_multihead_attention') - parser.add_argument('--beta_multi_behavior', default=0.005, type=float, help='scale of infoNCELoss') - parser.add_argument('--sampNum_slot', default=30, type=int, help='SSL_step') - parser.add_argument('--SSL_slot', default=1, type=int, help='SSL_step') - parser.add_argument('--k', default=2, type=float, help='MFB') - parser.add_argument('--meta_time_rate', default=0.8, type=float, help='gating rate') - parser.add_argument('--meta_behavior_rate', default=0.8, type=float, help='gating rate') - parser.add_argument('--meta_slot', default=2, type=int, help='epoch number for each SSL') - parser.add_argument('--time_slot', default=60*60*24*360, type=float, help='length of time slots') - parser.add_argument('--hidden_dim_meta', default=16, type=int, help='embedding size') - # parser.add_argument('--att_head', default=2, type=int, help='number of attention heads') - # parser.add_argument('--gnn_layer', default=2, type=int, help='number of gnn layers') - # parser.add_argument('--trnNum', default=10000, type=int, help='number of training instances per epoch') - # parser.add_argument('--deep_layer', default=0, type=int, help='number of deep layers to make the final prediction') - # parser.add_argument('--iiweight', default=0.3, type=float, help='weight for ii') - # parser.add_argument('--graphSampleN', default=10000, type=int, help='use 25000 for training and 200000 for testing, empirically') - # parser.add_argument('--divSize', default=1000, type=int, help='div size for smallTestEpoch') - # parser.add_argument('--tstEpoch', default=1, type=int, help='number of epoch to test while training') - # parser.add_argument('--subUsrSize', default=10, type=int, help='number of item for each sub-user') - # parser.add_argument('--subUsrDcy', default=0.9, type=float, help='decay factor for sub-users over time') - # parser.add_argument('--slot', default=0.5, type=float, help='length of time slots') -#---------retail-------------------------------------------------------------------------------------------------------------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# #for train +# parser.add_argument('--lr', default=3e-4, type=float, help='learning rate') +# parser.add_argument('--opt_base_lr', default=1e-3, type=float, help='learning rate') +# parser.add_argument('--opt_max_lr', default=2e-3, type=float, help='learning rate') +# parser.add_argument('--opt_weight_decay', default=1e-4, type=float, help='weight decay regularizer') +# parser.add_argument('--meta_opt_base_lr', default=1e-4, type=float, help='learning rate') +# parser.add_argument('--meta_opt_max_lr', default=1e-3, type=float, help='learning rate') +# parser.add_argument('--meta_opt_weight_decay', default=1e-4, type=float, help='weight decay regularizer') +# parser.add_argument('--meta_lr', default=1e-3, type=float, help='_meta_learning rate') + +# parser.add_argument('--batch', default=8192, type=int, help='batch size') +# parser.add_argument('--meta_batch', default=128, type=int, help='batch size') +# parser.add_argument('--SSL_batch', default=30, type=int, help='batch size') +# parser.add_argument('--reg', default=1e-3, type=float, help='weight decay regularizer') +# parser.add_argument('--beta', default=0.005, type=float, help='scale of infoNCELoss') +# parser.add_argument('--epoch', default=300, type=int, help='number of epochs') +# # parser.add_argument('--decay', default=0.96, type=float, help='weight decay rate') +# parser.add_argument('--shoot', default=10, type=int, help='K of top k') +# parser.add_argument('--inner_product_mult', default=1, type=float, help='multiplier for the result') +# parser.add_argument('--drop_rate', default=0.8, type=float, help='drop_rate') +# parser.add_argument('--drop_rate1', default=0.5, type=float, help='drop_rate') +# parser.add_argument('--seed', type=int, default=6) +# parser.add_argument('--slope', type=float, default=0.1) +# parser.add_argument('--patience', type=int, default=100) +# #for save and read +# parser.add_argument('--path', default='/home/ww/Code/MultiBehavior_BASELINE/MB-GCN/Datasets/', type=str, help='data path') +# parser.add_argument('--save_path', default='tem', help='file name to save model and training record') +# parser.add_argument('--load_model', default=None, help='model name to load') +# parser.add_argument('--target', default='buy', type=str, help='target behavior to predict on') +# parser.add_argument('--isload', default=False , type=bool, help='whether load model') +# parser.add_argument('--isJustTest', default=False , type=bool, help='whether load model') +# parser.add_argument('--loadModelPath', default='/home/ww/Code/work3/BSTRec/Model/IJCAI_15/for_meta_hidden_dim_dim__8_IJCAI_15_2021_07_10__14_11_55_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth', type=str, help='loadModelPath') +# # #Tmall: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/Tmall/for_meta_hidden_dim_dim__8_Tmall_2021_07_08__01_35_54_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth" +# # #IJCAI_15: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/IJCAI_15/for_meta_hidden_dim_dim__8_IJCAI_15_2021_07_10__14_11_55_lr_0.0003_reg_0.001_batch_size_4096_gnn_layer_[16,16,16].pth" +# # #retailrocket: # loadPath_SSL_meta = "/home/ww/Code/work3/BSTRec/Model/retailrocket/for_meta_hidden_dim_dim__8_retailrocket_2021_07_10__18_35_32_lr_0.0003_reg_0.01_batch_size_1024_gnn_layer_[16,16,16].pth" + + + +# #use less +# # parser.add_argument('--memosize', default=2, type=int, help='memory size') +# parser.add_argument('--head_num', default=4, type=int, help='head_num_of_multihead_attention') +# parser.add_argument('--beta_multi_behavior', default=0.005, type=float, help='scale of infoNCELoss') +# parser.add_argument('--sampNum_slot', default=30, type=int, help='SSL_step') +# parser.add_argument('--SSL_slot', default=1, type=int, help='SSL_step') +# parser.add_argument('--k', default=2, type=float, help='MFB') +# parser.add_argument('--meta_time_rate', default=0.8, type=float, help='gating rate') +# parser.add_argument('--meta_behavior_rate', default=0.8, type=float, help='gating rate') +# parser.add_argument('--meta_slot', default=2, type=int, help='epoch number for each SSL') +# parser.add_argument('--time_slot', default=60*60*24*360, type=float, help='length of time slots') +# parser.add_argument('--hidden_dim_meta', default=16, type=int, help='embedding size') +# # parser.add_argument('--att_head', default=2, type=int, help='number of attention heads') +# # parser.add_argument('--gnn_layer', default=2, type=int, help='number of gnn layers') +# # parser.add_argument('--trnNum', default=10000, type=int, help='number of training instances per epoch') +# # parser.add_argument('--deep_layer', default=0, type=int, help='number of deep layers to make the final prediction') +# # parser.add_argument('--iiweight', default=0.3, type=float, help='weight for ii') +# # parser.add_argument('--graphSampleN', default=10000, type=int, help='use 25000 for training and 200000 for testing, empirically') +# # parser.add_argument('--divSize', default=1000, type=int, help='div size for smallTestEpoch') +# # parser.add_argument('--tstEpoch', default=1, type=int, help='number of epoch to test while training') +# # parser.add_argument('--subUsrSize', default=10, type=int, help='number of item for each sub-user') +# # parser.add_argument('--subUsrDcy', default=0.9, type=float, help='decay factor for sub-users over time') +# # parser.add_argument('--slot', default=0.5, type=float, help='length of time slots') return parser.parse_args() args = parse_args() -# TODO: 这几句被注释掉了, 后面用到了. 问题是: args还可以按照后面的这个直接加??? +# # args.user = 805506#147894 # args.item = 584050#99037 # ML10M diff --git a/README.md b/README.md index 5c3c025..955237d 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,90 @@ # CML -The implementation of Contrastive Meta Learning with Behavior Multiplicity for Recommendation + -mkdir /History -mkdir /Model +This repository contains PyTorch codes and datasets for the paper: -### Requirements -numpy==1.20.3 +> Wei, Wei and Huang, Chao and Xia, Lianghao and Xu, Yong and Zhao, Jiashu and Yin, Dawei. Contrastive Meta Learning with Behavior Multiplicity forRecommendation. Paper in arXiv. -scipy==1.6.2 -torch==1.8.1+cu111 +## Introduction +Contrastive Meta Learning (CML) leverages multi-behavior learning paradigm to model diverse and multiplex user-item relationships, as well as tackling the label scarcity problem for target behaviors. The designed multi-behavior contrastive task is to capture the transferable user-item relationships from multi-typed user behavior data heterogeneity. And the proposed meta contrastive encoding scheme allows CML to preserve the personalized multi-behavior characteristics, so as to be reflective of the diverse behavior-aware user preference under a customized self-supervised framework. -tqdm==4.61.2 +## Citation +``` +@inproceedings{wei2022contrastive, + title={Contrastive meta learning with behavior multiplicity for recommendation}, + author={Wei, Wei and Huang, Chao and Xia, Lianghao and Xu, Yong and Zhao, Jiashu and Yin, Dawei}, + booktitle={Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining}, + pages={1120--1128}, + year={2022} +} +``` + + +## Environment + +The codes of CML are implemented and tested under the following development environment: + +- Python 3.6 +- torch==1.8.1+cu111 +- scipy==1.6.2 +- tqdm==4.61.2 + + + +## Datasets + +#### Raw data: +- IJCAI contest: https://tianchi.aliyun.com/dataset/dataDetail?dataId=47 +- Retail Rocket: https://www.kaggle.com/retailrocket/ecommerce-dataset +- Tmall: https://tianchi.aliyun.com/dataset/dataDetail?dataId=649 +#### Processed data: +- The processed IJCAI are under the /datasets folder. + + +## Usage + +The command to train CML on the Tmall/IJCAI/Retailrocket datasets are as follows. The commands specify the hyperparameter settings that generate the reported results in the paper. + +* Tmall +``` +python main.py --path=./datasets/ --dataset=Tmall --opt_base_lr=1e-3 --opt_max_lr=5e-3 --opt_weight_decay=1e-4 --meta_opt_base_lr=1e-4 --meta_opt_max_lr=2e-3 --meta_opt_weight_decay=1e-4 --meta_lr=1e-3 --batch=8192 --meta_batch=128 --SSL_batch=18 +``` +* IJCAI +``` +python main.py --path=./datasets/ --dataset=IJCAI_15 --sampNum=10 --opt_base_lr=1e-3 --opt_max_lr=2e-3 --opt_weight_decay=1e-4 --meta_opt_base_lr=1e-4 --meta_opt_max_lr=1e-3 --meta_opt_weight_decay=1e-4 --meta_lr=1e-3 --batch=8192 --meta_batch=128 --SSL_batch=30 +``` +* Retailrocket +``` +python main.py --path=./datasets/ --dataset='retailrocket' --sampNum=40 --lr=3e-4 --opt_base_lr=1e-4 --opt_max_lr=1e-3 --opt_weight_decay=1e-4 --opt_weight_decay=1e-4 --meta_opt_base_lr=1e-4 --meta_opt_max_lr=1e-3 --meta_opt_weight_decay=1e-3 --meta_lr=1e-3 --batch=2048 --meta_batch=128 --SSL_batch=15 +``` + + + + + + + + + + + + + + + +It will be released again in few days in the optimized code version. -### Datasets -IJCAI contest: https://tianchi.aliyun.com/dataset/dataDetail?dataId=47 -Retail Rocket: https://www.kaggle.com/retailrocket/ecommerce-dataset -Tmall: https://tianchi.aliyun.com/dataset/dataDetail?dataId=649 diff --git a/Utils/README.md b/Utils/README.md new file mode 100644 index 0000000..9c558e3 --- /dev/null +++ b/Utils/README.md @@ -0,0 +1 @@ +. diff --git a/__pycache__/BGNN.cpython-38.pyc b/__pycache__/BGNN.cpython-38.pyc deleted file mode 100644 index f7197be..0000000 Binary files a/__pycache__/BGNN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/BSTRec.cpython-38.pyc b/__pycache__/BSTRec.cpython-38.pyc deleted file mode 100644 index 6e91ec7..0000000 Binary files a/__pycache__/BSTRec.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/BSTRec_beh.cpython-38.pyc b/__pycache__/BSTRec_beh.cpython-38.pyc deleted file mode 100644 index a7988fc..0000000 Binary files a/__pycache__/BSTRec_beh.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/DataHandler.cpython-37.pyc b/__pycache__/DataHandler.cpython-37.pyc deleted file mode 100644 index 36e4915..0000000 Binary files a/__pycache__/DataHandler.cpython-37.pyc and /dev/null differ diff --git a/__pycache__/DataHandler.cpython-38.pyc b/__pycache__/DataHandler.cpython-38.pyc deleted file mode 100644 index ae1a3db..0000000 Binary files a/__pycache__/DataHandler.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MB_GCN.cpython-38.pyc b/__pycache__/MB_GCN.cpython-38.pyc deleted file mode 100644 index f7c2858..0000000 Binary files a/__pycache__/MB_GCN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MB_GCN_test.cpython-38.pyc b/__pycache__/MB_GCN_test.cpython-38.pyc deleted file mode 100644 index 2278fbe..0000000 Binary files a/__pycache__/MB_GCN_test.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MB_GCN_test_behavior.cpython-38.pyc b/__pycache__/MB_GCN_test_behavior.cpython-38.pyc deleted file mode 100644 index 400b054..0000000 Binary files a/__pycache__/MB_GCN_test_behavior.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MB_GCN_test_behavior_no_RNN.cpython-38.pyc b/__pycache__/MB_GCN_test_behavior_no_RNN.cpython-38.pyc deleted file mode 100644 index f59c21a..0000000 Binary files a/__pycache__/MB_GCN_test_behavior_no_RNN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MB_GCN_test_behavior_normal_RNN.cpython-38.pyc b/__pycache__/MB_GCN_test_behavior_normal_RNN.cpython-38.pyc deleted file mode 100644 index 044c3c7..0000000 Binary files a/__pycache__/MB_GCN_test_behavior_normal_RNN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MB_GCN_test_behavior_self_attention_knowledge.cpython-38.pyc b/__pycache__/MB_GCN_test_behavior_self_attention_knowledge.cpython-38.pyc deleted file mode 100644 index cdb88d7..0000000 Binary files a/__pycache__/MB_GCN_test_behavior_self_attention_knowledge.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MV_Net.cpython-38.pyc b/__pycache__/MV_Net.cpython-38.pyc deleted file mode 100644 index 0c1a887..0000000 Binary files a/__pycache__/MV_Net.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MV_Net_beh.cpython-38.pyc b/__pycache__/MV_Net_beh.cpython-38.pyc deleted file mode 100644 index 2a32d9d..0000000 Binary files a/__pycache__/MV_Net_beh.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/MV_Net_one_weights.cpython-38.pyc b/__pycache__/MV_Net_one_weights.cpython-38.pyc deleted file mode 100644 index 40dd623..0000000 Binary files a/__pycache__/MV_Net_one_weights.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/NMTR.cpython-38.pyc b/__pycache__/NMTR.cpython-38.pyc deleted file mode 100644 index 65acc1b..0000000 Binary files a/__pycache__/NMTR.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/Params.cpython-37.pyc b/__pycache__/Params.cpython-37.pyc deleted file mode 100644 index 88f2180..0000000 Binary files a/__pycache__/Params.cpython-37.pyc and /dev/null differ diff --git a/__pycache__/Params.cpython-38.pyc b/__pycache__/Params.cpython-38.pyc deleted file mode 100644 index ad1c836..0000000 Binary files a/__pycache__/Params.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/cell.cpython-38.pyc b/__pycache__/cell.cpython-38.pyc deleted file mode 100644 index c49a428..0000000 Binary files a/__pycache__/cell.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/cell_self_attention_knowledge.cpython-38.pyc b/__pycache__/cell_self_attention_knowledge.cpython-38.pyc deleted file mode 100644 index cf7d1f3..0000000 Binary files a/__pycache__/cell_self_attention_knowledge.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/evaluate.cpython-37.pyc b/__pycache__/evaluate.cpython-37.pyc deleted file mode 100644 index 81cb4e4..0000000 Binary files a/__pycache__/evaluate.cpython-37.pyc and /dev/null differ diff --git a/__pycache__/evaluate.cpython-38.pyc b/__pycache__/evaluate.cpython-38.pyc deleted file mode 100644 index 5f21466..0000000 Binary files a/__pycache__/evaluate.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/graph_utils.cpython-38.pyc b/__pycache__/graph_utils.cpython-38.pyc deleted file mode 100644 index 72e5403..0000000 Binary files a/__pycache__/graph_utils.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/graph_utils_gcn.cpython-38.pyc b/__pycache__/graph_utils_gcn.cpython-38.pyc deleted file mode 100644 index eb899b7..0000000 Binary files a/__pycache__/graph_utils_gcn.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/graph_utils_time.cpython-38.pyc b/__pycache__/graph_utils_time.cpython-38.pyc deleted file mode 100644 index 466f6ca..0000000 Binary files a/__pycache__/graph_utils_time.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/hypergraph_utils.cpython-37.pyc b/__pycache__/hypergraph_utils.cpython-37.pyc deleted file mode 100644 index 71d3237..0000000 Binary files a/__pycache__/hypergraph_utils.cpython-37.pyc and /dev/null differ diff --git a/__pycache__/hypergraph_utils.cpython-38.pyc b/__pycache__/hypergraph_utils.cpython-38.pyc deleted file mode 100644 index e972681..0000000 Binary files a/__pycache__/hypergraph_utils.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/meta_weight_net.cpython-38.pyc b/__pycache__/meta_weight_net.cpython-38.pyc deleted file mode 100644 index 45a7801..0000000 Binary files a/__pycache__/meta_weight_net.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/meta_weight_net_single.cpython-38.pyc b/__pycache__/meta_weight_net_single.cpython-38.pyc deleted file mode 100644 index fbc26b5..0000000 Binary files a/__pycache__/meta_weight_net_single.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/meta_weight_net_trans.cpython-38.pyc b/__pycache__/meta_weight_net_trans.cpython-38.pyc deleted file mode 100644 index de19d05..0000000 Binary files a/__pycache__/meta_weight_net_trans.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel.cpython-37.pyc b/__pycache__/myModel.cpython-37.pyc deleted file mode 100644 index 2126ec1..0000000 Binary files a/__pycache__/myModel.cpython-37.pyc and /dev/null differ diff --git a/__pycache__/myModel.cpython-38.pyc b/__pycache__/myModel.cpython-38.pyc deleted file mode 100644 index e49990e..0000000 Binary files a/__pycache__/myModel.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_NoGNN.cpython-38.pyc b/__pycache__/myModel_NoGNN.cpython-38.pyc deleted file mode 100644 index a41532f..0000000 Binary files a/__pycache__/myModel_NoGNN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_NoGNN_mean.cpython-38.pyc b/__pycache__/myModel_NoGNN_mean.cpython-38.pyc deleted file mode 100644 index 507a618..0000000 Binary files a/__pycache__/myModel_NoGNN_mean.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_NoGNN_tmall.cpython-38.pyc b/__pycache__/myModel_NoGNN_tmall.cpython-38.pyc deleted file mode 100644 index da6dedc..0000000 Binary files a/__pycache__/myModel_NoGNN_tmall.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_RNN.cpython-38.pyc b/__pycache__/myModel_RNN.cpython-38.pyc deleted file mode 100644 index 8f93925..0000000 Binary files a/__pycache__/myModel_RNN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_mean.cpython-38.pyc b/__pycache__/myModel_mean.cpython-38.pyc deleted file mode 100644 index 9b08824..0000000 Binary files a/__pycache__/myModel_mean.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_mean_tmall.cpython-38.pyc b/__pycache__/myModel_mean_tmall.cpython-38.pyc deleted file mode 100644 index da4580b..0000000 Binary files a/__pycache__/myModel_mean_tmall.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_meta_RNN.cpython-38.pyc b/__pycache__/myModel_meta_RNN.cpython-38.pyc deleted file mode 100644 index 38784cc..0000000 Binary files a/__pycache__/myModel_meta_RNN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_meta_RNN_GAT.cpython-38.pyc b/__pycache__/myModel_meta_RNN_GAT.cpython-38.pyc deleted file mode 100644 index 2bb4a4e..0000000 Binary files a/__pycache__/myModel_meta_RNN_GAT.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_meta_RNN_GCN.cpython-38.pyc b/__pycache__/myModel_meta_RNN_GCN.cpython-38.pyc deleted file mode 100644 index 8758938..0000000 Binary files a/__pycache__/myModel_meta_RNN_GCN.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_meta_RNN_one_gru.cpython-38.pyc b/__pycache__/myModel_meta_RNN_one_gru.cpython-38.pyc deleted file mode 100644 index f22197a..0000000 Binary files a/__pycache__/myModel_meta_RNN_one_gru.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_meta_RNN_one_gru_target_auxiliary.cpython-38.pyc b/__pycache__/myModel_meta_RNN_one_gru_target_auxiliary.cpython-38.pyc deleted file mode 100644 index e93fbe2..0000000 Binary files a/__pycache__/myModel_meta_RNN_one_gru_target_auxiliary.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_multi_head.cpython-38.pyc b/__pycache__/myModel_multi_head.cpython-38.pyc deleted file mode 100644 index 4409e39..0000000 Binary files a/__pycache__/myModel_multi_head.cpython-38.pyc and /dev/null differ diff --git a/__pycache__/myModel_ori.cpython-38.pyc b/__pycache__/myModel_ori.cpython-38.pyc deleted file mode 100644 index c3fc3bd..0000000 Binary files a/__pycache__/myModel_ori.cpython-38.pyc and /dev/null differ diff --git a/data/README.md b/data/README.md new file mode 100644 index 0000000..9c558e3 --- /dev/null +++ b/data/README.md @@ -0,0 +1 @@ +. diff --git a/main.py b/main.py index b596baf..5ec6051 100644 --- a/main.py +++ b/main.py @@ -486,7 +486,8 @@ def trainEpoch(self): time = datetime.datetime.now() print("start_ng_samp: ", time) train_loader.dataset.ng_sample() - time = datetime.datetime.now() + + print("end_ng_samp: ", time) epoch_loss = 0