Commit 9d0f41b7 authored by Chris Shallue's avatar Chris Shallue Committed by Christopher Shallue
Browse files

Replace '%' string formatting with .format().

PiperOrigin-RevId: 213353962
parent 313d0c41
...@@ -54,24 +54,6 @@ from astronet.astro_model import astro_model ...@@ -54,24 +54,6 @@ from astronet.astro_model import astro_model
class AstroCNNModel(astro_model.AstroModel): class AstroCNNModel(astro_model.AstroModel):
"""A model for classifying light curves using a convolutional neural net.""" """A model for classifying light curves using a convolutional neural net."""
def __init__(self, features, labels, hparams, mode):
"""Basic setup. The actual TensorFlow graph is constructed in build().
Args:
features: A dictionary containing "time_series_features" and
"aux_features", each of which is a dictionary of named input Tensors.
All features have dtype float32 and shape [batch_size, length].
labels: An int64 Tensor with shape [batch_size]. May be None if mode is
tf.estimator.ModeKeys.PREDICT.
hparams: A ConfigDict of hyperparameters for building the model.
mode: A tf.estimator.ModeKeys to specify whether the graph should be built
for training, evaluation or prediction.
Raises:
ValueError: If mode is invalid.
"""
super(AstroCNNModel, self).__init__(features, labels, hparams, mode)
def _build_cnn_layers(self, inputs, hparams, scope="cnn"): def _build_cnn_layers(self, inputs, hparams, scope="cnn"):
"""Builds convolutional layers. """Builds convolutional layers.
...@@ -95,7 +77,7 @@ class AstroCNNModel(astro_model.AstroModel): ...@@ -95,7 +77,7 @@ class AstroCNNModel(astro_model.AstroModel):
for i in range(hparams.cnn_num_blocks): for i in range(hparams.cnn_num_blocks):
num_filters = int(hparams.cnn_initial_num_filters * num_filters = int(hparams.cnn_initial_num_filters *
hparams.cnn_block_filter_factor**i) hparams.cnn_block_filter_factor**i)
with tf.variable_scope("block_%d" % (i + 1)): with tf.variable_scope("block_{}".format(i + 1)):
for j in range(hparams.cnn_block_size): for j in range(hparams.cnn_block_size):
net = tf.layers.conv1d( net = tf.layers.conv1d(
inputs=net, inputs=net,
...@@ -103,7 +85,7 @@ class AstroCNNModel(astro_model.AstroModel): ...@@ -103,7 +85,7 @@ class AstroCNNModel(astro_model.AstroModel):
kernel_size=int(hparams.cnn_kernel_size), kernel_size=int(hparams.cnn_kernel_size),
padding=hparams.convolution_padding, padding=hparams.convolution_padding,
activation=tf.nn.relu, activation=tf.nn.relu,
name="conv_%d" % (j + 1)) name="conv_{}".format(j + 1))
if hparams.pool_size > 1: # pool_size 0 or 1 denotes no pooling if hparams.pool_size > 1: # pool_size 0 or 1 denotes no pooling
net = tf.layers.max_pooling1d( net = tf.layers.max_pooling1d(
......
...@@ -58,24 +58,6 @@ from astronet.astro_model import astro_model ...@@ -58,24 +58,6 @@ from astronet.astro_model import astro_model
class AstroFCModel(astro_model.AstroModel): class AstroFCModel(astro_model.AstroModel):
"""A model for classifying light curves using fully connected layers.""" """A model for classifying light curves using fully connected layers."""
def __init__(self, features, labels, hparams, mode):
"""Basic setup. The actual TensorFlow graph is constructed in build().
Args:
features: A dictionary containing "time_series_features" and
"aux_features", each of which is a dictionary of named input Tensors.
All features have dtype float32 and shape [batch_size, length].
labels: An int64 Tensor with shape [batch_size]. May be None if mode is
tf.estimator.ModeKeys.PREDICT.
hparams: A ConfigDict of hyperparameters for building the model.
mode: A tf.estimator.ModeKeys to specify whether the graph should be built
for training, evaluation or prediction.
Raises:
ValueError: If mode is invalid.
"""
super(AstroFCModel, self).__init__(features, labels, hparams, mode)
def _build_local_fc_layers(self, inputs, hparams, scope): def _build_local_fc_layers(self, inputs, hparams, scope):
"""Builds locally fully connected layers. """Builds locally fully connected layers.
...@@ -120,8 +102,8 @@ class AstroFCModel(astro_model.AstroModel): ...@@ -120,8 +102,8 @@ class AstroFCModel(astro_model.AstroModel):
elif hparams.pooling_type == "avg": elif hparams.pooling_type == "avg":
net = tf.reduce_mean(net, axis=1, name="avg_pool") net = tf.reduce_mean(net, axis=1, name="avg_pool")
else: else:
raise ValueError( raise ValueError("Unrecognized pooling_type: {}".format(
"Unrecognized pooling_type: %s" % hparams.pooling_type) hparams.pooling_type))
remaining_layers = hparams.num_local_layers - 1 remaining_layers = hparams.num_local_layers - 1
else: else:
...@@ -133,7 +115,7 @@ class AstroFCModel(astro_model.AstroModel): ...@@ -133,7 +115,7 @@ class AstroFCModel(astro_model.AstroModel):
inputs=net, inputs=net,
num_outputs=hparams.local_layer_size, num_outputs=hparams.local_layer_size,
activation_fn=tf.nn.relu, activation_fn=tf.nn.relu,
scope="fully_connected_%d" % (i + 1)) scope="fully_connected_{}".format(i + 1))
if hparams.dropout_rate > 0: if hparams.dropout_rate > 0:
net = tf.layers.dropout( net = tf.layers.dropout(
......
...@@ -93,7 +93,7 @@ class AstroModel(object): ...@@ -93,7 +93,7 @@ class AstroModel(object):
tf.estimator.ModeKeys.PREDICT tf.estimator.ModeKeys.PREDICT
] ]
if mode not in valid_modes: if mode not in valid_modes:
raise ValueError("Expected mode in %s. Got: %s" % (valid_modes, mode)) raise ValueError("Expected mode in {}. Got: {}".format(valid_modes, mode))
self.hparams = hparams self.hparams = hparams
self.mode = mode self.mode = mode
...@@ -213,7 +213,7 @@ class AstroModel(object): ...@@ -213,7 +213,7 @@ class AstroModel(object):
inputs=net, inputs=net,
units=self.hparams.pre_logits_hidden_layer_size, units=self.hparams.pre_logits_hidden_layer_size,
activation=tf.nn.relu, activation=tf.nn.relu,
name="fully_connected_%s" % (i + 1)) name="fully_connected_{}".format(i + 1))
if self.hparams.pre_logits_dropout_rate > 0: if self.hparams.pre_logits_dropout_rate > 0:
net = tf.layers.dropout( net = tf.layers.dropout(
......
...@@ -100,7 +100,7 @@ parser.add_argument( ...@@ -100,7 +100,7 @@ parser.add_argument(
required=True, required=True,
help="CSV file containing the Q1-Q17 DR24 Kepler TCE table. Must contain " help="CSV file containing the Q1-Q17 DR24 Kepler TCE table. Must contain "
"columns: rowid, kepid, tce_plnt_num, tce_period, tce_duration, " "columns: rowid, kepid, tce_plnt_num, tce_period, tce_duration, "
"tce_time0bk. Download from: %s" % _DR24_TCE_URL) "tce_time0bk. Download from: {}".format(_DR24_TCE_URL))
parser.add_argument( parser.add_argument(
"--kepler_data_dir", "--kepler_data_dir",
...@@ -219,8 +219,10 @@ def main(argv): ...@@ -219,8 +219,10 @@ def main(argv):
for i in range(FLAGS.num_train_shards): for i in range(FLAGS.num_train_shards):
start = boundaries[i] start = boundaries[i]
end = boundaries[i + 1] end = boundaries[i + 1]
file_shards.append((train_tces[start:end], os.path.join( filename = os.path.join(
FLAGS.output_dir, "train-%.5d-of-%.5d" % (i, FLAGS.num_train_shards)))) FLAGS.output_dir, "train-{:05d}-of-{:05d}".format(
i, FLAGS.num_train_shards))
file_shards.append((train_tces[start:end], filename))
# Validation and test sets each have a single shard. # Validation and test sets each have a single shard.
file_shards.append((val_tces, os.path.join(FLAGS.output_dir, file_shards.append((val_tces, os.path.join(FLAGS.output_dir,
......
...@@ -47,8 +47,8 @@ def read_light_curve(kepid, kepler_data_dir): ...@@ -47,8 +47,8 @@ def read_light_curve(kepid, kepler_data_dir):
# Read the Kepler light curve. # Read the Kepler light curve.
file_names = kepler_io.kepler_filenames(kepler_data_dir, kepid) file_names = kepler_io.kepler_filenames(kepler_data_dir, kepid)
if not file_names: if not file_names:
raise IOError("Failed to find .fits files in %s for Kepler ID %s" % raise IOError("Failed to find .fits files in {} for Kepler ID {}".format(
(kepler_data_dir, kepid)) kepler_data_dir, kepid))
return kepler_io.read_kepler_light_curve(file_names) return kepler_io.read_kepler_light_curve(file_names)
......
...@@ -46,7 +46,7 @@ def get_model_class(model_name): ...@@ -46,7 +46,7 @@ def get_model_class(model_name):
ValueError: If model_name is unrecognized. ValueError: If model_name is unrecognized.
""" """
if model_name not in _MODELS: if model_name not in _MODELS:
raise ValueError("Unrecognized model name: %s" % model_name) raise ValueError("Unrecognized model name: {}".format(model_name))
return _MODELS[model_name][0] return _MODELS[model_name][0]
...@@ -67,11 +67,12 @@ def get_model_config(model_name, config_name): ...@@ -67,11 +67,12 @@ def get_model_config(model_name, config_name):
ValueError: If model_name or config_name is unrecognized. ValueError: If model_name or config_name is unrecognized.
""" """
if model_name not in _MODELS: if model_name not in _MODELS:
raise ValueError("Unrecognized model name: %s" % model_name) raise ValueError("Unrecognized model name: {}".format(model_name))
config_module = _MODELS[model_name][1] config_module = _MODELS[model_name][1]
try: try:
return getattr(config_module, config_name)() return getattr(config_module, config_name)()
except AttributeError: except AttributeError:
raise ValueError("Config name '%s' not found in configuration module: %s" % raise ValueError(
(config_name, config_module.__name__)) "Config name '{}' not found in configuration module: {}".format(
config_name, config_module.__name__))
...@@ -69,7 +69,7 @@ def _recursive_pad_to_batch_size(tensor_or_collection, batch_size): ...@@ -69,7 +69,7 @@ def _recursive_pad_to_batch_size(tensor_or_collection, batch_size):
for t in tensor_or_collection for t in tensor_or_collection
] ]
raise ValueError("Unknown input type: %s" % tensor_or_collection) raise ValueError("Unknown input type: {}".format(tensor_or_collection))
def pad_dataset_to_batch_size(dataset, batch_size): def pad_dataset_to_batch_size(dataset, batch_size):
...@@ -119,7 +119,7 @@ def _recursive_set_batch_size(tensor_or_collection, batch_size): ...@@ -119,7 +119,7 @@ def _recursive_set_batch_size(tensor_or_collection, batch_size):
for t in tensor_or_collection: for t in tensor_or_collection:
_recursive_set_batch_size(t, batch_size) _recursive_set_batch_size(t, batch_size)
else: else:
raise ValueError("Unknown input type: %s" % tensor_or_collection) raise ValueError("Unknown input type: {}".format(tensor_or_collection))
return tensor_or_collection return tensor_or_collection
...@@ -170,7 +170,7 @@ def build_dataset(file_pattern, ...@@ -170,7 +170,7 @@ def build_dataset(file_pattern,
for p in file_patterns: for p in file_patterns:
matches = tf.gfile.Glob(p) matches = tf.gfile.Glob(p)
if not matches: if not matches:
raise ValueError("Found no input files matching %s" % p) raise ValueError("Found no input files matching {}".format(p))
filenames.extend(matches) filenames.extend(matches)
tf.logging.info("Building input pipeline from %d files matching patterns: %s", tf.logging.info("Building input pipeline from %d files matching patterns: %s",
len(filenames), file_patterns) len(filenames), file_patterns)
...@@ -180,8 +180,8 @@ def build_dataset(file_pattern, ...@@ -180,8 +180,8 @@ def build_dataset(file_pattern,
label_ids = set(input_config.label_map.values()) label_ids = set(input_config.label_map.values())
if label_ids != set(range(len(label_ids))): if label_ids != set(range(len(label_ids))):
raise ValueError( raise ValueError(
"Label IDs must be contiguous integers starting at 0. Got: %s" % "Label IDs must be contiguous integers starting at 0. Got: {}".format(
label_ids) label_ids))
# Create a HashTable mapping label strings to integer ids. # Create a HashTable mapping label strings to integer ids.
table_initializer = tf.contrib.lookup.KeyValueTensorInitializer( table_initializer = tf.contrib.lookup.KeyValueTensorInitializer(
......
...@@ -74,7 +74,7 @@ def create_optimizer(hparams, learning_rate, use_tpu=False): ...@@ -74,7 +74,7 @@ def create_optimizer(hparams, learning_rate, use_tpu=False):
elif optimizer_name == "rmsprop": elif optimizer_name == "rmsprop":
optimizer = tf.RMSPropOptimizer(learning_rate) optimizer = tf.RMSPropOptimizer(learning_rate)
else: else:
raise ValueError("Unknown optimizer: %s" % hparams.optimizer) raise ValueError("Unknown optimizer: {}".format(hparams.optimizer))
if use_tpu: if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
......
...@@ -49,14 +49,15 @@ def parse_json(json_string_or_file): ...@@ -49,14 +49,15 @@ def parse_json(json_string_or_file):
with tf.gfile.Open(json_string_or_file) as f: with tf.gfile.Open(json_string_or_file) as f:
json_dict = json.load(f) json_dict = json.load(f)
except ValueError as json_file_parsing_error: except ValueError as json_file_parsing_error:
raise ValueError("Unable to parse the content of the json file %s. " raise ValueError("Unable to parse the content of the json file {}. "
"Parsing error: %s." % (json_string_or_file, "Parsing error: {}.".format(
json_string_or_file,
json_file_parsing_error.message)) json_file_parsing_error.message))
except tf.gfile.FileError: except tf.gfile.FileError:
message = ("Unable to parse the input parameter neither as literal " message = ("Unable to parse the input parameter neither as literal "
"JSON nor as the name of a file that exists.\n" "JSON nor as the name of a file that exists.\n"
"JSON parsing error: %s\n\n Input parameter:\n%s." % "JSON parsing error: {}\n\n Input parameter:\n{}.".format(
(literal_json_parsing_error.message, json_string_or_file)) literal_json_parsing_error.message, json_string_or_file))
raise ValueError(message) raise ValueError(message)
return json_dict return json_dict
......
...@@ -132,8 +132,8 @@ class _ModelFn(object): ...@@ -132,8 +132,8 @@ class _ModelFn(object):
if "labels" in features: if "labels" in features:
if labels is not None and labels is not features["labels"]: if labels is not None and labels is not features["labels"]:
raise ValueError( raise ValueError(
"Conflicting labels: features['labels'] = %s, labels = %s" % "Conflicting labels: features['labels'] = {}, labels = {}".format(
(features["labels"], labels)) features["labels"], labels))
labels = features.pop("labels") labels = features.pop("labels")
model = self._model_class(features, labels, hparams, mode) model = self._model_class(features, labels, hparams, mode)
......
...@@ -48,7 +48,8 @@ def get_feature(ex, name, kind=None, strict=True): ...@@ -48,7 +48,8 @@ def get_feature(ex, name, kind=None, strict=True):
return np.array([]) # Feature exists, but it's empty. return np.array([]) # Feature exists, but it's empty.
if kind and kind != inferred_kind: if kind and kind != inferred_kind:
raise TypeError("Requested %s, but Feature has %s" % (kind, inferred_kind)) raise TypeError("Requested {}, but Feature has {}".format(
kind, inferred_kind))
return np.array(getattr(ex.features.feature[name], inferred_kind).value) return np.array(getattr(ex.features.feature[name], inferred_kind).value)
...@@ -105,7 +106,8 @@ def set_feature(ex, ...@@ -105,7 +106,8 @@ def set_feature(ex,
del ex.features.feature[name] del ex.features.feature[name]
else: else:
raise ValueError( raise ValueError(
"Attempting to set duplicate feature with name: %s" % name) "Attempting to overwrite feature with name: {}. "
"Set allow_overwrite=True if this is desired.".format(name))
if not kind: if not kind:
kind = _infer_kind(value) kind = _infer_kind(value)
...@@ -117,7 +119,7 @@ def set_feature(ex, ...@@ -117,7 +119,7 @@ def set_feature(ex,
elif kind == "int64_list": elif kind == "int64_list":
value = [int(v) for v in value] value = [int(v) for v in value]
else: else:
raise ValueError("Unrecognized kind: %s" % kind) raise ValueError("Unrecognized kind: {}".format(kind))
getattr(ex.features.feature[name], kind).value.extend(value) getattr(ex.features.feature[name], kind).value.extend(value)
......
...@@ -24,7 +24,8 @@ def ValueErrorOnFalse(ok, *output_args): ...@@ -24,7 +24,8 @@ def ValueErrorOnFalse(ok, *output_args):
"""Raises ValueError if not ok, otherwise returns the output arguments.""" """Raises ValueError if not ok, otherwise returns the output arguments."""
n_outputs = len(output_args) n_outputs = len(output_args)
if n_outputs < 2: if n_outputs < 2:
raise ValueError("Expected 2 or more output_args. Got: %d" % n_outputs) raise ValueError(
"Expected 2 or more output_args. Got: {}".format(n_outputs))
if not ok: if not ok:
error = output_args[-1] error = output_args[-1]
......
...@@ -119,7 +119,7 @@ def kepler_filenames(base_dir, ...@@ -119,7 +119,7 @@ def kepler_filenames(base_dir,
A list of filenames. A list of filenames.
""" """
# Pad the Kepler id with zeros to length 9. # Pad the Kepler id with zeros to length 9.
kep_id = "%.9d" % int(kep_id) kep_id = "{:09d}".format(int(kep_id))
quarter_prefixes, cadence_suffix = ((LONG_CADENCE_QUARTER_PREFIXES, "llc") quarter_prefixes, cadence_suffix = ((LONG_CADENCE_QUARTER_PREFIXES, "llc")
if long_cadence else if long_cadence else
...@@ -135,11 +135,10 @@ def kepler_filenames(base_dir, ...@@ -135,11 +135,10 @@ def kepler_filenames(base_dir,
for quarter in quarters: for quarter in quarters:
for quarter_prefix in quarter_prefixes[quarter]: for quarter_prefix in quarter_prefixes[quarter]:
if injected_group: if injected_group:
base_name = "kplr%s-%s_INJECTED-%s_%s.fits" % (kep_id, quarter_prefix, base_name = "kplr{}-{}_INJECTED-{}_{}.fits".format(
injected_group, kep_id, quarter_prefix, injected_group, cadence_suffix)
cadence_suffix)
else: else:
base_name = "kplr%s-%s_%s.fits" % (kep_id, quarter_prefix, base_name = "kplr{}-{}_{}.fits".format(kep_id, quarter_prefix,
cadence_suffix) cadence_suffix)
filename = os.path.join(base_dir, base_name) filename = os.path.join(base_dir, base_name)
# Not all stars have data for all quarters. # Not all stars have data for all quarters.
......
...@@ -122,15 +122,17 @@ class KeplerIoTest(absltest.TestCase): ...@@ -122,15 +122,17 @@ class KeplerIoTest(absltest.TestCase):
filenames = kepler_io.kepler_filenames( filenames = kepler_io.kepler_filenames(
self.data_dir, 11442793, check_existence=True) self.data_dir, 11442793, check_existence=True)
expected_filenames = [ expected_filenames = [
os.path.join(self.data_dir, "0114/011442793/kplr011442793-%s_llc.fits") os.path.join(self.data_dir,
% q for q in ["2009350155506", "2010009091648", "2010174085026"] "0114/011442793/kplr011442793-{}_llc.fits".format(q))
for q in ["2009350155506", "2010009091648", "2010174085026"]
] ]
self.assertItemsEqual(expected_filenames, filenames) self.assertItemsEqual(expected_filenames, filenames)
def testReadKeplerLightCurve(self): def testReadKeplerLightCurve(self):
filenames = [ filenames = [
os.path.join(self.data_dir, "0114/011442793/kplr011442793-%s_llc.fits") os.path.join(self.data_dir,
% q for q in ["2009350155506", "2010009091648", "2010174085026"] "0114/011442793/kplr011442793-{}_llc.fits".format(q))
for q in ["2009350155506", "2010009091648", "2010174085026"]
] ]
all_time, all_flux = kepler_io.read_kepler_light_curve(filenames) all_time, all_flux = kepler_io.read_kepler_light_curve(filenames)
self.assertLen(all_time, 3) self.assertLen(all_time, 3)
...@@ -148,8 +150,9 @@ class KeplerIoTest(absltest.TestCase): ...@@ -148,8 +150,9 @@ class KeplerIoTest(absltest.TestCase):
def testReadKeplerLightCurveScrambled(self): def testReadKeplerLightCurveScrambled(self):
filenames = [ filenames = [
os.path.join(self.data_dir, "0114/011442793/kplr011442793-%s_llc.fits") os.path.join(self.data_dir,
% q for q in ["2009350155506", "2010009091648", "2010174085026"] "0114/011442793/kplr011442793-{}_llc.fits".format(q))
for q in ["2009350155506", "2010009091648", "2010174085026"]
] ]
all_time, all_flux = kepler_io.read_kepler_light_curve( all_time, all_flux = kepler_io.read_kepler_light_curve(
filenames, scramble_type="SCR1") filenames, scramble_type="SCR1")
...@@ -170,8 +173,9 @@ class KeplerIoTest(absltest.TestCase): ...@@ -170,8 +173,9 @@ class KeplerIoTest(absltest.TestCase):
def testReadKeplerLightCurveScrambledInterpolateMissingTime(self): def testReadKeplerLightCurveScrambledInterpolateMissingTime(self):
filenames = [ filenames = [
os.path.join(self.data_dir, "0114/011442793/kplr011442793-%s_llc.fits") os.path.join(self.data_dir,
% q for q in ["2009350155506", "2010009091648", "2010174085026"] "0114/011442793/kplr011442793-{}_llc.fits".format(q))
for q in ["2009350155506", "2010009091648", "2010174085026"]
] ]
all_time, all_flux = kepler_io.read_kepler_light_curve( all_time, all_flux = kepler_io.read_kepler_light_curve(
filenames, scramble_type="SCR1", interpolate_missing_time=True) filenames, scramble_type="SCR1", interpolate_missing_time=True)
......
...@@ -51,35 +51,35 @@ def median_filter(x, y, num_bins, bin_width=None, x_min=None, x_max=None): ...@@ -51,35 +51,35 @@ def median_filter(x, y, num_bins, bin_width=None, x_min=None, x_max=None):
ValueError: If an argument has an inappropriate value. ValueError: If an argument has an inappropriate value.
""" """
if num_bins < 2: if num_bins < 2:
raise ValueError("num_bins must be at least 2. Got: %d" % num_bins) raise ValueError("num_bins must be at least 2. Got: {}".format(num_bins))
# Validate the lengths of x and y. # Validate the lengths of x and y.
x_len = len(x) x_len = len(x)
if x_len < 2: if x_len < 2:
raise ValueError("len(x) must be at least 2. Got: %s" % x_len) raise ValueError("len(x) must be at least 2. Got: {}".format(x_len))
if x_len != len(y): if x_len != len(y):
raise ValueError("len(x) (got: %d) must equal len(y) (got: %d)" % (x_len, raise ValueError("len(x) (got: {}) must equal len(y) (got: {})".format(
len(y))) x_len, len(y)))
# Validate x_min and x_max. # Validate x_min and x_max.
x_min = x_min if x_min is not None else x[0] x_min = x_min if x_min is not None else x[0]
x_max = x_max if x_max is not None else x[-1] x_max = x_max if x_max is not None else x[-1]
if x_min >= x_max: if x_min >= x_max:
raise ValueError("x_min (got: %d) must be less than x_max (got: %d)" % raise ValueError("x_min (got: {}) must be less than x_max (got: {})".format(
(x_min, x_max)) x_min, x_max))
if x_min > x[-1]: if x_min > x[-1]:
raise ValueError( raise ValueError(
"x_min (got: %d) must be less than or equal to the largest value of x " "x_min (got: {}) must be less than or equal to the largest value of x "
"(got: %d)" % (x_min, x[-1])) "(got: {})".format(x_min, x[-1]))
# Validate bin_width. # Validate bin_width.
bin_width = bin_width if bin_width is not None else (x_max - x_min) / num_bins bin_width = bin_width if bin_width is not None else (x_max - x_min) / num_bins
if bin_width <= 0: if bin_width <= 0:
raise ValueError("bin_width must be positive. Got: %d" % bin_width) raise ValueError("bin_width must be positive. Got: {}".format(bin_width))
if bin_width >= x_max - x_min: if bin_width >= x_max - x_min:
raise ValueError( raise ValueError(
"bin_width (got: %d) must be less than x_max - x_min (got: %d)" % "bin_width (got: {}) must be less than x_max - x_min (got: {})".format(
(bin_width, x_max - x_min)) bin_width, x_max - x_min))
bin_spacing = (x_max - x_min - bin_width) / (num_bins - 1) bin_spacing = (x_max - x_min - bin_width) / (num_bins - 1)
......
...@@ -287,8 +287,8 @@ def count_transit_points(time, event): ...@@ -287,8 +287,8 @@ def count_transit_points(time, event):
# Tiny periods or erroneous time values could make this loop take forever. # Tiny periods or erroneous time values could make this loop take forever.
if (t_max - t_min) / event.period > 10**6: if (t_max - t_min) / event.period > 10**6:
raise ValueError( raise ValueError(
"Too many transits! Time range is [%.2f, %.2f] and period is %.2e." % "Too many transits! Time range is [{:.4f}, {:.4f}] and period is "
(t_min, t_max, event.period)) "{:.4e}.".format(t_min, t_max, event.period))
# Make sure t0 is in [t_min, t_min + period). # Make sure t0 is in [t_min, t_min + period).
t0 = np.mod(event.t0 - t_min, event.period) + t_min t0 = np.mod(event.t0 - t_min, event.period) + t_min
......
...@@ -54,7 +54,8 @@ def kepler_spline(time, flux, bkspace=1.5, maxiter=5, outlier_cut=3): ...@@ -54,7 +54,8 @@ def kepler_spline(time, flux, bkspace=1.5, maxiter=5, outlier_cut=3):
""" """
if len(time) < 4: if len(time) < 4:
raise InsufficientPointsError( raise InsufficientPointsError(
"Cannot fit a spline on less than 4 points. Got %d points." % len(time)) "Cannot fit a spline on less than 4 points. Got {} points.".format(
len(time)))
# Rescale time into [0, 1]. # Rescale time into [0, 1].
t_min = np.min(time) t_min = np.min(time)
...@@ -91,7 +92,7 @@ def kepler_spline(time, flux, bkspace=1.5, maxiter=5, outlier_cut=3): ...@@ -91,7 +92,7 @@ def kepler_spline(time, flux, bkspace=1.5, maxiter=5, outlier_cut=3):
# and we consider this a fatal error. # and we consider this a fatal error.
raise InsufficientPointsError( raise InsufficientPointsError(
"Cannot fit a spline on less than 4 points. After removing " "Cannot fit a spline on less than 4 points. After removing "
"outliers, got %d points." % np.sum(mask)) "outliers, got {} points.".format(np.sum(mask)))
try: try:
with warnings.catch_warnings(): with warnings.catch_warnings():
...@@ -106,9 +107,9 @@ def kepler_spline(time, flux, bkspace=1.5, maxiter=5, outlier_cut=3): ...@@ -106,9 +107,9 @@ def kepler_spline(time, flux, bkspace=1.5, maxiter=5, outlier_cut=3):
spline = curve.value(time)[0] spline = curve.value(time)[0]
except (IndexError, TypeError) as e: except (IndexError, TypeError) as e:
raise SplineError( raise SplineError(
"Fitting spline failed with error: '%s'. This might be caused by the " "Fitting spline failed with error: '{}'. This might be caused by the "
"breakpoint spacing being too small, and/or there being insufficient " "breakpoint spacing being too small, and/or there being insufficient "
"points to fit the spline in one of the intervals." % e) "points to fit the spline in one of the intervals.".format(e))
return spline, mask return spline, mask
...@@ -227,7 +228,7 @@ def choose_kepler_spline(all_time, ...@@ -227,7 +228,7 @@ def choose_kepler_spline(all_time,
# It's expected to get a SplineError occasionally for small values of # It's expected to get a SplineError occasionally for small values of
# bkspace. Skip this bkspace. # bkspace. Skip this bkspace.
if verbose: if verbose:
warnings.warn("Bad bkspace %.4f: %s" % (bkspace, e)) warnings.warn("Bad bkspace {}: {}".format(bkspace, e))
metadata.bad_bkspaces.append(bkspace) metadata.bad_bkspaces.append(bkspace)
bad_bkspace = True bad_bkspace = True
break break
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment