data_preprocessing.py 10.1 KB
Newer Older
Frederick Liu's avatar
Frederick Liu committed
1
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Frederick Liu's avatar
Frederick Liu committed
14

15
16
17
18
19
20
21
22
23
24
"""Preprocess dataset and construct any necessary artifacts."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import pickle
import time
import timeit
25
26
import typing
from typing import Dict, Text, Tuple
Hongkun Yu's avatar
Hongkun Yu committed
27

28
from absl import logging
29
30
31
32
33
import numpy as np
import pandas as pd
import tensorflow as tf

from official.recommendation import constants as rconst
34
from official.recommendation import data_pipeline
35
from official.recommendation import movielens
36

37

Hongkun Yu's avatar
Hongkun Yu committed
38
39
40
_EXPECTED_CACHE_KEYS = (rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY,
                        rconst.EVAL_USER_KEY, rconst.EVAL_ITEM_KEY,
                        rconst.USER_MAP, rconst.ITEM_MAP)
41
42


43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
def read_dataframe(
    raw_rating_path: Text
) -> Tuple[Dict[int, int], Dict[int, int], pd.DataFrame]:
  """Read in data CSV, and output DataFrame for downstream processing.

  This function reads in the raw CSV of positive items, and performs three
  preprocessing transformations:

  1)  Filter out all users who have not rated at least a certain number
      of items. (Typically 20 items)

  2)  Zero index the users and items such that the largest user_id is
      `num_users - 1` and the largest item_id is `num_items - 1`

  3)  Sort the dataframe by user_id, with timestamp as a secondary sort key.
      This allows the dataframe to be sliced by user in-place, and for the last
      item to be selected simply by calling the `-1` index of a user's slice.

  Args:
    raw_rating_path: The path to the CSV which contains the raw dataset.

  Returns:
    A dict mapping raw user IDs to regularized user IDs, a dict mapping raw
    item IDs to regularized item IDs, and a filtered, zero-index remapped,
    sorted dataframe.
  """
  with tf.io.gfile.GFile(raw_rating_path) as f:
    df = pd.read_csv(f)

  # Get the info of users who have more than 20 ratings on items
  grouped = df.groupby(movielens.USER_COLUMN)
  df = grouped.filter(
      lambda x: len(x) >= rconst.MIN_NUM_RATINGS)  # type: pd.DataFrame

  original_users = df[movielens.USER_COLUMN].unique()
  original_items = df[movielens.ITEM_COLUMN].unique()

  # Map the ids of user and item to 0 based index for following processing
  logging.info("Generating user_map and item_map...")
  user_map = {user: index for index, user in enumerate(original_users)}
  item_map = {item: index for index, item in enumerate(original_items)}

  df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
      lambda user: user_map[user])
  df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
      lambda item: item_map[item])

  num_users = len(original_users)
  num_items = len(original_items)

  assert num_users <= np.iinfo(rconst.USER_DTYPE).max
  assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
  assert df[movielens.USER_COLUMN].max() == num_users - 1
  assert df[movielens.ITEM_COLUMN].max() == num_items - 1

  # This sort is used to shard the dataframe by user, and later to select
  # the last item for a user to be used in validation.
  logging.info("Sorting by user, timestamp...")

  # This sort is equivalent to
  #   df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
  #   inplace=True)
  # except that the order of items with the same user and timestamp are
  # sometimes different. For some reason, this sort results in a better
  # hit-rate during evaluation, matching the performance of the MLPerf
  # reference implementation.
  df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
  df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
                 inplace=True,
                 kind="mergesort")

  # The dataframe does not reconstruct indices in the sort or filter steps.
  return user_map, item_map, df.reset_index()


def _filter_index_sort(raw_rating_path: Text,
                       cache_path: Text) -> Tuple[pd.DataFrame, bool]:
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
  """Read in data CSV, and output structured data.

  This function reads in the raw CSV of positive items, and performs three
  preprocessing transformations:

  1)  Filter out all users who have not rated at least a certain number
      of items. (Typically 20 items)

  2)  Zero index the users and items such that the largest user_id is
      `num_users - 1` and the largest item_id is `num_items - 1`

  3)  Sort the dataframe by user_id, with timestamp as a secondary sort key.
      This allows the dataframe to be sliced by user in-place, and for the last
      item to be selected simply by calling the `-1` index of a user's slice.

  While all of these transformations are performed by Pandas (and are therefore
  single-threaded), they only take ~2 minutes, and the overhead to apply a
  MapReduce pattern to parallel process the dataset adds significant complexity
  for no computational gain. For a larger dataset parallelizing this
  preprocessing could yield speedups. (Also, this preprocessing step is only
  performed once for an entire run.

  Args:
    raw_rating_path: The path to the CSV which contains the raw dataset.
144
    cache_path: The path to the file where results of this function are saved.
145
146

  Returns:
Reed's avatar
Reed committed
147
148
149
    A filtered, zero-index remapped, sorted dataframe, a dict mapping raw user
    IDs to regularized user IDs, and a dict mapping raw item IDs to regularized
    item IDs.
150
  """
151
  valid_cache = tf.io.gfile.exists(cache_path)
152
  if valid_cache:
153
    with tf.io.gfile.GFile(cache_path, "rb") as f:
154
      cached_data = pickle.load(f)
155

156
157
158
159
    # (nnigania)disabled this check as the dataset is not expected to change
    # cache_age = time.time() - cached_data.get("create_time", 0)
    # if cache_age > rconst.CACHE_INVALIDATION_SEC:
    #   valid_cache = False
160

161
162
163
    for key in _EXPECTED_CACHE_KEYS:
      if key not in cached_data:
        valid_cache = False
164

165
    if not valid_cache:
166
167
      logging.info("Removing stale raw data cache file.")
      tf.io.gfile.remove(cache_path)
168

169
170
171
  if valid_cache:
    data = cached_data
  else:
172
    user_map, item_map, df = read_dataframe(raw_rating_path)
173
174
175
176
177

    grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)
    eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])

    data = {
Hongkun Yu's avatar
Hongkun Yu committed
178
179
180
181
182
183
184
185
186
187
188
189
190
191
        rconst.TRAIN_USER_KEY:
            train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
        rconst.TRAIN_ITEM_KEY:
            train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
        rconst.EVAL_USER_KEY:
            eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
        rconst.EVAL_ITEM_KEY:
            eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
        rconst.USER_MAP:
            user_map,
        rconst.ITEM_MAP:
            item_map,
        "create_time":
            time.time(),
192
193
    }

194
195
    logging.info("Writing raw data cache.")
    with tf.io.gfile.GFile(cache_path, "wb") as f:
196
      pickle.dump(data, f, protocol=4)
197
198
199
200
201

  # TODO(robieta): MLPerf cache clear.
  return data, valid_cache


202
203
204
205
206
207
208
209
def instantiate_pipeline(dataset,
                         data_dir,
                         params,
                         constructor_type=None,
                         deterministic=False,
                         epoch_dir=None,
                         generate_data_offline=False):
  # type: (str, str, dict, typing.Optional[str], bool, typing.Optional[str], bool) -> (int, int, data_pipeline.BaseDataConstructor)
210
211
212
213
214
  """Load and digest data CSV into a usable form.

  Args:
    dataset: The name of the dataset to be used.
    data_dir: The root directory of the dataset.
Taylor Robie's avatar
Taylor Robie committed
215
    params: dict of parameters for the run.
216
217
218
    constructor_type: The name of the constructor subclass that should be used
      for the input pipeline.
    deterministic: Tell the data constructor to produce deterministically.
219
    epoch_dir: Directory in which to store the training epochs.
Hongkun Yu's avatar
Hongkun Yu committed
220
221
    generate_data_offline: Boolean, whether current pipeline is done offline or
      while training.
222
  """
223
  logging.info("Beginning data preprocessing.")
224
225
226

  st = timeit.default_timer()
  raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
227
228
  cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)

Taylor Robie's avatar
Taylor Robie committed
229
  raw_data, _ = _filter_index_sort(raw_rating_path, cache_path)
230
  user_map, item_map = raw_data["user_map"], raw_data["item_map"]
231
  num_users, num_items = movielens.DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
232
233
234
235
236
237
238

  if num_users != len(user_map):
    raise ValueError("Expected to find {} users, but found {}".format(
        num_users, len(user_map)))
  if num_items != len(item_map):
    raise ValueError("Expected to find {} items, but found {}".format(
        num_items, len(item_map)))
239

240
  producer = data_pipeline.get_constructor(constructor_type or "materialized")(
241
242
243
244
245
246
247
248
249
250
251
252
253
254
      maximum_number_epochs=params["train_epochs"],
      num_users=num_users,
      num_items=num_items,
      user_map=user_map,
      item_map=item_map,
      train_pos_users=raw_data[rconst.TRAIN_USER_KEY],
      train_pos_items=raw_data[rconst.TRAIN_ITEM_KEY],
      train_batch_size=params["batch_size"],
      batches_per_train_step=params["batches_per_step"],
      num_train_negatives=params["num_neg"],
      eval_pos_users=raw_data[rconst.EVAL_USER_KEY],
      eval_pos_items=raw_data[rconst.EVAL_ITEM_KEY],
      eval_batch_size=params["eval_batch_size"],
      batches_per_eval_step=params["batches_per_step"],
255
      stream_files=params["stream_files"],
256
      deterministic=deterministic,
257
258
      epoch_dir=epoch_dir,
      create_data_offline=generate_data_offline)
259
260

  run_time = timeit.default_timer() - st
Hongkun Yu's avatar
Hongkun Yu committed
261
262
  logging.info(
      "Data preprocessing complete. Time: {:.1f} sec.".format(run_time))
263

264
265
  print(producer)
  return num_users, num_items, producer