movielens.py 9.39 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and extract the MovieLens dataset from GroupLens website.

Download the dataset, and perform basic preprocessing.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
import tempfile
import zipfile

# pylint: disable=g-bad-import-order
import numpy as np
import pandas as pd
import six
from six.moves import urllib  # pylint: disable=redefined-builtin
from absl import app as absl_app
from absl import flags
36
from absl import logging
37
38
39
40
41
42
import tensorflow as tf
# pylint: enable=g-bad-import-order

from official.utils.flags import core as flags_core


A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
ML_1M = "ml-1m"
ML_20M = "ml-20m"
DATASETS = [ML_1M, ML_20M]

RATINGS_FILE = "ratings.csv"
MOVIES_FILE = "movies.csv"

# URL to download dataset
_DATA_URL = "http://files.grouplens.org/datasets/movielens/"

GENRE_COLUMN = "genres"
ITEM_COLUMN = "item_id"  # movies
RATING_COLUMN = "rating"
TIMESTAMP_COLUMN = "timestamp"
TITLE_COLUMN = "titles"
USER_COLUMN = "user_id"

GENRES = [
    'Action', 'Adventure', 'Animation', "Children", 'Comedy', 'Crime',
    'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', "IMAX", 'Musical',
    'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'
]
N_GENRE = len(GENRES)

RATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN]
MOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN]

# Note: Users are indexed [1, k], not [0, k-1]
NUM_USER_IDS = {
    ML_1M: 6040,
    ML_20M: 138493,
}

77
# Note: Movies are indexed [1, k], not [0, k-1]
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# Both the 1m and 20m datasets use the same movie set.
NUM_ITEM_IDS = 3952

MAX_RATING = 5

NUM_RATINGS = {
    ML_1M: 1000209,
    ML_20M: 20000263
}


def _download_and_clean(dataset, data_dir):
  """Download MovieLens dataset in a standard format.

  This function downloads the specified MovieLens format and coerces it into a
  standard format. The only difference between the ml-1m and ml-20m datasets
  after this point (other than size, of course) is that the 1m dataset uses
  whole number ratings while the 20m dataset allows half integer ratings.
  """
  if dataset not in DATASETS:
    raise ValueError("dataset {} is not in {{{}}}".format(
        dataset, ",".join(DATASETS)))

  data_subdir = os.path.join(data_dir, dataset)

  expected_files = ["{}.zip".format(dataset), RATINGS_FILE, MOVIES_FILE]

105
  tf.io.gfile.makedirs(data_subdir)
106
  if set(expected_files).intersection(
107
108
      tf.io.gfile.listdir(data_subdir)) == set(expected_files):
    logging.info("Dataset {} has already been downloaded".format(dataset))
109
110
111
112
113
114
115
    return

  url = "{}{}.zip".format(_DATA_URL, dataset)

  temp_dir = tempfile.mkdtemp()
  try:
    zip_path = os.path.join(temp_dir, "{}.zip".format(dataset))
116
    zip_path, _ = urllib.request.urlretrieve(url, zip_path)
117
118
    statinfo = os.stat(zip_path)
    # A new line to clear the carriage return from download progress
119
    # logging.info is not applicable here
120
    print()
121
    logging.info(
122
123
124
125
126
127
128
129
130
131
        "Successfully downloaded {} {} bytes".format(
            zip_path, statinfo.st_size))

    zipfile.ZipFile(zip_path, "r").extractall(temp_dir)

    if dataset == ML_1M:
      _regularize_1m_dataset(temp_dir)
    else:
      _regularize_20m_dataset(temp_dir)

132
133
134
135
    for fname in tf.io.gfile.listdir(temp_dir):
      if not tf.io.gfile.exists(os.path.join(data_subdir, fname)):
        tf.io.gfile.copy(os.path.join(temp_dir, fname),
                         os.path.join(data_subdir, fname))
136
      else:
137
138
        logging.info("Skipping copy of {}, as it already exists in the "
                     "destination folder.".format(fname))
139
140

  finally:
141
    tf.io.gfile.rmtree(temp_dir)
142
143
144
145
146
147
148
149
150
151
152
153
154


def _transform_csv(input_path, output_path, names, skip_first, separator=","):
  """Transform csv to a regularized format.

  Args:
    input_path: The path of the raw csv.
    output_path: The path of the cleaned csv.
    names: The csv column names.
    skip_first: Boolean of whether to skip the first line of the raw csv.
    separator: Character used to separate fields in the raw csv.
  """
  if six.PY2:
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
155
    names = [six.ensure_text(n, "utf-8") for n in names]
156

157
158
  with tf.io.gfile.GFile(output_path, "wb") as f_out, \
      tf.io.gfile.GFile(input_path, "rb") as f_in:
159
160
161
162
163
164
165
166

    # Write column names to the csv.
    f_out.write(",".join(names).encode("utf-8"))
    f_out.write(b"\n")
    for i, line in enumerate(f_in):
      if i == 0 and skip_first:
        continue  # ignore existing labels in the csv

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
167
      line = six.ensure_text(line, "utf-8", errors="ignore")
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
      fields = line.split(separator)
      if separator != ",":
        fields = ['"{}"'.format(field) if "," in field else field
                  for field in fields]
      f_out.write(",".join(fields).encode("utf-8"))


def _regularize_1m_dataset(temp_dir):
  """
  ratings.dat
    The file has no header row, and each line is in the following format:
    UserID::MovieID::Rating::Timestamp
      - UserIDs range from 1 and 6040
      - MovieIDs range from 1 and 3952
      - Ratings are made on a 5-star scale (whole-star ratings only)
      - Timestamp is represented in seconds since midnight Coordinated Universal
        Time (UTC) of January 1, 1970.
      - Each user has at least 20 ratings

  movies.dat
    Each line has the following format:
    MovieID::Title::Genres
      - MovieIDs range from 1 and 3952
  """
  working_dir = os.path.join(temp_dir, ML_1M)

  _transform_csv(
      input_path=os.path.join(working_dir, "ratings.dat"),
      output_path=os.path.join(temp_dir, RATINGS_FILE),
      names=RATING_COLUMNS, skip_first=False, separator="::")

  _transform_csv(
      input_path=os.path.join(working_dir, "movies.dat"),
      output_path=os.path.join(temp_dir, MOVIES_FILE),
      names=MOVIE_COLUMNS, skip_first=False, separator="::")

204
  tf.io.gfile.rmtree(working_dir)
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237


def _regularize_20m_dataset(temp_dir):
  """
  ratings.csv
    Each line of this file after the header row represents one rating of one
    movie by one user, and has the following format:
    userId,movieId,rating,timestamp
    - The lines within this file are ordered first by userId, then, within user,
      by movieId.
    - Ratings are made on a 5-star scale, with half-star increments
      (0.5 stars - 5.0 stars).
    - Timestamps represent seconds since midnight Coordinated Universal Time
      (UTC) of January 1, 1970.
    - All the users had rated at least 20 movies.

  movies.csv
    Each line has the following format:
    MovieID,Title,Genres
      - MovieIDs range from 1 and 3952
  """
  working_dir = os.path.join(temp_dir, ML_20M)

  _transform_csv(
      input_path=os.path.join(working_dir, "ratings.csv"),
      output_path=os.path.join(temp_dir, RATINGS_FILE),
      names=RATING_COLUMNS, skip_first=True, separator=",")

  _transform_csv(
      input_path=os.path.join(working_dir, "movies.csv"),
      output_path=os.path.join(temp_dir, MOVIES_FILE),
      names=MOVIE_COLUMNS, skip_first=True, separator=",")

238
  tf.io.gfile.rmtree(working_dir)
239
240
241
242
243
244
245
246
247
248


def download(dataset, data_dir):
  if dataset:
    _download_and_clean(dataset, data_dir)
  else:
    _ = [_download_and_clean(d, data_dir) for d in DATASETS]


def ratings_csv_to_dataframe(data_dir, dataset):
249
  with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f:
250
251
252
253
254
255
    return pd.read_csv(f, encoding="utf-8")


def csv_to_joint_dataframe(data_dir, dataset):
  ratings = ratings_csv_to_dataframe(data_dir, dataset)

256
  with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f:
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
    movies = pd.read_csv(f, encoding="utf-8")

  df = ratings.merge(movies, on=ITEM_COLUMN)
  df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32)

  return df


def integerize_genres(dataframe):
  """Replace genre string with a binary vector.

  Args:
    dataframe: a pandas dataframe of movie data.

  Returns:
    The transformed dataframe.
  """
  def _map_fn(entry):
    entry.replace("Children's", "Children")  # naming difference.
    movie_genres = entry.split("|")
    output = np.zeros((len(GENRES),), dtype=np.int64)
    for i, genre in enumerate(GENRES):
      if genre in movie_genres:
        output[i] = 1
    return output

  dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn)

  return dataframe


def define_data_download_flags():
  """Add flags specifying data download arguments."""
  flags.DEFINE_string(
      name="data_dir", default="/tmp/movielens-data/",
      help=flags_core.help_wrap(
          "Directory to download and extract data."))

  flags.DEFINE_enum(
      name="dataset", default=None,
      enum_values=DATASETS, case_sensitive=False,
      help=flags_core.help_wrap("Dataset to be trained and evaluated."))


def main(_):
  """Download and extract the data from GroupLens website."""
  download(flags.FLAGS.dataset, flags.FLAGS.data_dir)


if __name__ == "__main__":
  define_data_download_flags()
  FLAGS = flags.FLAGS
  absl_app.run(main)