Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
yangql
googletest
Commits
47f819c3
Commit
47f819c3
authored
Aug 10, 2021
by
dmauro
Committed by
Derek Mauro
Aug 10, 2021
Browse files
Googletest export
Remove deprecated/unsupported scripts PiperOrigin-RevId: 389873391
parent
eb7e38df
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
0 additions
and
3074 deletions
+0
-3074
googletest/scripts/README.md
googletest/scripts/README.md
+0
-5
googletest/scripts/common.py
googletest/scripts/common.py
+0
-83
googletest/scripts/fuse_gtest_files.py
googletest/scripts/fuse_gtest_files.py
+0
-253
googletest/scripts/gen_gtest_pred_impl.py
googletest/scripts/gen_gtest_pred_impl.py
+0
-730
googletest/scripts/gtest-config.in
googletest/scripts/gtest-config.in
+0
-274
googletest/scripts/release_docs.py
googletest/scripts/release_docs.py
+0
-158
googletest/scripts/run_with_path.py
googletest/scripts/run_with_path.py
+0
-32
googletest/scripts/test/Makefile
googletest/scripts/test/Makefile
+0
-59
googletest/scripts/upload.py
googletest/scripts/upload.py
+0
-1402
googletest/scripts/upload_gtest.py
googletest/scripts/upload_gtest.py
+0
-78
No files found.
googletest/scripts/README.md
deleted
100644 → 0
View file @
eb7e38df
# Please Note:
Files in this directory are no longer supported by the maintainers. They
represent mosty historical artifacts and supported by the community only. There
is no guarantee whatsoever that these scripts still work.
googletest/scripts/common.py
deleted
100644 → 0
View file @
eb7e38df
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Shared utilities for writing scripts for Google Test/Mock."""
__author__
=
'wan@google.com (Zhanyong Wan)'
import
os
import
re
# Matches the line from 'svn info .' output that describes what SVN
# path the current local directory corresponds to. For example, in
# a googletest SVN workspace's trunk/test directory, the output will be:
#
# URL: https://googletest.googlecode.com/svn/trunk/test
_SVN_INFO_URL_RE
=
re
.
compile
(
r
'^URL: https://(\w+)\.googlecode\.com/svn(.*)'
)
def
GetCommandOutput
(
command
):
"""Runs the shell command and returns its stdout as a list of lines."""
f
=
os
.
popen
(
command
,
'r'
)
lines
=
[
line
.
strip
()
for
line
in
f
.
readlines
()]
f
.
close
()
return
lines
def
GetSvnInfo
():
"""Returns the project name and the current SVN workspace's root path."""
for
line
in
GetCommandOutput
(
'svn info .'
):
m
=
_SVN_INFO_URL_RE
.
match
(
line
)
if
m
:
project
=
m
.
group
(
1
)
# googletest or googlemock
rel_path
=
m
.
group
(
2
)
root
=
os
.
path
.
realpath
(
rel_path
.
count
(
'/'
)
*
'../'
)
return
project
,
root
return
None
,
None
def
GetSvnTrunk
():
"""Returns the current SVN workspace's trunk root path."""
_
,
root
=
GetSvnInfo
()
return
root
+
'/trunk'
if
root
else
None
def
IsInGTestSvn
():
project
,
_
=
GetSvnInfo
()
return
project
==
'googletest'
def
IsInGMockSvn
():
project
,
_
=
GetSvnInfo
()
return
project
==
'googlemock'
googletest/scripts/fuse_gtest_files.py
deleted
100755 → 0
View file @
eb7e38df
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
https://github.com/google/googletest/blob/master/googletest/docs/advanced.md for
more information.
"""
__author__
=
'wan@google.com (Zhanyong Wan)'
import
os
import
re
try
:
from
sets
import
Set
as
set
# For Python 2.3 compatibility
except
ImportError
:
pass
import
sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR
=
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
'..'
)
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX
=
re
.
compile
(
r
'^\s*#\s*include\s*"(gtest/.+)"'
)
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX
=
re
.
compile
(
r
'^\s*#\s*include\s*"(src/.+)"'
)
# Where to find the source seed files.
GTEST_H_SEED
=
'include/gtest/gtest.h'
GTEST_SPI_H_SEED
=
'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED
=
'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT
=
'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT
=
'gtest/gtest-all.cc'
def
VerifyFileExists
(
directory
,
relative_path
):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if
not
os
.
path
.
isfile
(
os
.
path
.
join
(
directory
,
relative_path
)):
print
(
'ERROR: Cannot find %s in directory %s.'
%
(
relative_path
,
directory
))
print
(
'Please either specify a valid project root directory '
'or omit it on the command line.'
)
sys
.
exit
(
1
)
def
ValidateGTestRootDir
(
gtest_root
):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists
(
gtest_root
,
GTEST_H_SEED
)
VerifyFileExists
(
gtest_root
,
GTEST_ALL_CC_SEED
)
def
VerifyOutputFile
(
output_dir
,
relative_path
):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file
=
os
.
path
.
join
(
output_dir
,
relative_path
)
if
os
.
path
.
exists
(
output_file
):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print
(
'%s already exists in directory %s - overwrite it? (y/N) '
%
(
relative_path
,
output_dir
))
answer
=
sys
.
stdin
.
readline
().
strip
()
if
answer
not
in
[
'y'
,
'Y'
]:
print
(
'ABORTED.'
)
sys
.
exit
(
1
)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory
=
os
.
path
.
dirname
(
output_file
)
if
not
os
.
path
.
isdir
(
parent_directory
):
os
.
makedirs
(
parent_directory
)
def
ValidateOutputDir
(
output_dir
):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile
(
output_dir
,
GTEST_H_OUTPUT
)
VerifyOutputFile
(
output_dir
,
GTEST_ALL_CC_OUTPUT
)
def
FuseGTestH
(
gtest_root
,
output_dir
):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file
=
open
(
os
.
path
.
join
(
output_dir
,
GTEST_H_OUTPUT
),
'w'
)
processed_files
=
set
()
# Holds all gtest headers we've processed.
def
ProcessFile
(
gtest_header_path
):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if
gtest_header_path
in
processed_files
:
return
processed_files
.
add
(
gtest_header_path
)
# Reads each line in the given gtest header.
for
line
in
open
(
os
.
path
.
join
(
gtest_root
,
gtest_header_path
),
'r'
):
m
=
INCLUDE_GTEST_FILE_REGEX
.
match
(
line
)
if
m
:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile
(
'include/'
+
m
.
group
(
1
))
else
:
# Otherwise we copy the line unchanged to the output file.
output_file
.
write
(
line
)
ProcessFile
(
GTEST_H_SEED
)
output_file
.
close
()
def
FuseGTestAllCcToFile
(
gtest_root
,
output_file
):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files
=
set
()
def
ProcessFile
(
gtest_source_file
):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if
gtest_source_file
in
processed_files
:
return
processed_files
.
add
(
gtest_source_file
)
# Reads each line in the given gtest source file.
for
line
in
open
(
os
.
path
.
join
(
gtest_root
,
gtest_source_file
),
'r'
):
m
=
INCLUDE_GTEST_FILE_REGEX
.
match
(
line
)
if
m
:
if
'include/'
+
m
.
group
(
1
)
==
GTEST_SPI_H_SEED
:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile
(
GTEST_SPI_H_SEED
)
else
:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if
not
GTEST_H_SEED
in
processed_files
:
processed_files
.
add
(
GTEST_H_SEED
)
output_file
.
write
(
'#include "%s"
\n
'
%
(
GTEST_H_OUTPUT
,))
else
:
m
=
INCLUDE_SRC_FILE_REGEX
.
match
(
line
)
if
m
:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile
(
m
.
group
(
1
))
else
:
output_file
.
write
(
line
)
ProcessFile
(
GTEST_ALL_CC_SEED
)
def
FuseGTestAllCc
(
gtest_root
,
output_dir
):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file
=
open
(
os
.
path
.
join
(
output_dir
,
GTEST_ALL_CC_OUTPUT
),
'w'
)
FuseGTestAllCcToFile
(
gtest_root
,
output_file
)
output_file
.
close
()
def
FuseGTest
(
gtest_root
,
output_dir
):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir
(
gtest_root
)
ValidateOutputDir
(
output_dir
)
FuseGTestH
(
gtest_root
,
output_dir
)
FuseGTestAllCc
(
gtest_root
,
output_dir
)
def
main
():
argc
=
len
(
sys
.
argv
)
if
argc
==
2
:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest
(
DEFAULT_GTEST_ROOT_DIR
,
sys
.
argv
[
1
])
elif
argc
==
3
:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest
(
sys
.
argv
[
1
],
sys
.
argv
[
2
])
else
:
print
(
__doc__
)
sys
.
exit
(
1
)
if
__name__
==
'__main__'
:
main
()
googletest/scripts/gen_gtest_pred_impl.py
deleted
100755 → 0
View file @
eb7e38df
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__
=
'wan@google.com (Zhanyong Wan)'
import
os
import
sys
import
time
# Where this script is.
SCRIPT_DIR
=
os
.
path
.
dirname
(
sys
.
argv
[
0
])
# Where to store the generated header.
HEADER
=
os
.
path
.
join
(
SCRIPT_DIR
,
'../include/gtest/gtest_pred_impl.h'
)
# Where to store the generated unit test.
UNIT_TEST
=
os
.
path
.
join
(
SCRIPT_DIR
,
'../test/gtest_pred_impl_unittest.cc'
)
def
HeaderPreamble
(
n
):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS
=
{
'today'
:
time
.
strftime
(
'%m/%d/%Y'
),
'year'
:
time
.
strftime
(
'%Y'
),
'command'
:
'%s %s'
%
(
os
.
path
.
basename
(
sys
.
argv
[
0
]),
n
),
'n'
:
n
}
return
((
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros."""
+
"""#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#include "gtest/gtest.h"
namespace testing {
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure)
\\
GTEST_AMBIGUOUS_ELSE_BLOCKER_
\\
if (const ::testing::AssertionResult gtest_ar = (expression))
\\
;
\\
else
\\
on_failure(gtest_ar.failure_message())
"""
)
%
DEFS
)
def
Arity
(
n
):
"""Returns the English name of the given arity."""
if
n
<
0
:
return
None
elif
n
<=
3
:
return
[
'nullary'
,
'unary'
,
'binary'
,
'ternary'
][
n
]
else
:
return
'%s-ary'
%
n
def
Title
(
word
):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return
word
[
0
].
upper
()
+
word
[
1
:]
def
OneTo
(
n
):
"""Returns the list [1, 2, 3, ..., n]."""
return
range
(
1
,
n
+
1
)
def
Iter
(
n
,
format
,
sep
=
''
):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count
=
len
(
format
.
split
(
'%s'
))
-
1
return
sep
.
join
([
format
%
(
spec_count
*
(
i
,))
for
i
in
OneTo
(
n
)])
def
ImplementationForArity
(
n
):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS
=
{
'n'
:
str
(
n
),
'vs'
:
Iter
(
n
,
'v%s'
,
sep
=
', '
),
'vts'
:
Iter
(
n
,
'#v%s'
,
sep
=
', '
),
'arity'
:
Arity
(
n
),
'Arity'
:
Title
(
Arity
(
n
))
}
impl
=
"""
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred"""
%
DEFS
impl
+=
Iter
(
n
,
""",
typename T%s"""
)
impl
+=
""">
AssertionResult AssertPred%(n)sHelper(const char* pred_text"""
%
DEFS
impl
+=
Iter
(
n
,
""",
const char* e%s"""
)
impl
+=
""",
Pred pred"""
impl
+=
Iter
(
n
,
""",
const T%s& v%s"""
)
impl
+=
""") {
if (pred(%(vs)s)) return AssertionSuccess();
"""
%
DEFS
impl
+=
' return AssertionFailure() << pred_text << "("'
impl
+=
Iter
(
n
,
"""
<< e%s"""
,
sep
=
' << ", "'
)
impl
+=
' << ") evaluates to false, where"'
impl
+=
Iter
(
n
,
"""
<< "
\\
n" << e%s << " evaluates to " << ::testing::PrintToString(v%s)"""
)
impl
+=
""";
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)
\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s),
\\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)
\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred"""
%
DEFS
impl
+=
Iter
(
n
,
""",
\\
#v%s"""
)
impl
+=
""",
\\
pred"""
impl
+=
Iter
(
n
,
""",
\\
v%s"""
)
impl
+=
"""), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s)
\\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s)
\\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s)
\\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s)
\\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
"""
%
DEFS
return
impl
def
HeaderPostamble
():
"""Returns the postamble for the header file."""
return
"""
} // namespace testing
#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def
GenerateFile
(
path
,
content
):
"""Given a file path and a content string
overwrites it with the given content.
"""
print
'Updating file %s . . .'
%
path
f
=
file
(
path
,
'w+'
)
print
>>
f
,
content
,
f
.
close
()
print
'File %s has been updated.'
%
path
def
GenerateHeader
(
n
):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions.
"""
GenerateFile
(
HEADER
,
HeaderPreamble
(
n
)
+
''
.
join
([
ImplementationForArity
(
i
)
for
i
in
OneTo
(
n
)])
+
HeaderPostamble
())
def
UnitTestPreamble
():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS
=
{
'today'
:
time
.
strftime
(
'%m/%d/%Y'
),
'year'
:
time
.
strftime
(
'%Y'
),
'command'
:
'%s %s'
%
(
os
.
path
.
basename
(
sys
.
argv
[
0
]),
sys
.
argv
[
1
]),
}
return
(
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
"""
%
DEFS
)
def
TestsForArity
(
n
):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS
=
{
'n'
:
n
,
'es'
:
Iter
(
n
,
'e%s'
,
sep
=
', '
),
'vs'
:
Iter
(
n
,
'v%s'
,
sep
=
', '
),
'vts'
:
Iter
(
n
,
'#v%s'
,
sep
=
', '
),
'tvs'
:
Iter
(
n
,
'T%s v%s'
,
sep
=
', '
),
'int_vs'
:
Iter
(
n
,
'int v%s'
,
sep
=
', '
),
'Bool_vs'
:
Iter
(
n
,
'Bool v%s'
,
sep
=
', '
),
'types'
:
Iter
(
n
,
'typename T%s'
,
sep
=
', '
),
'v_sum'
:
Iter
(
n
,
'v%s'
,
sep
=
' + '
),
'arity'
:
Arity
(
n
),
'Arity'
:
Title
(
Arity
(
n
)),
}
tests
=
(
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed because a compiler doesn't have
// a context yet to know which template function must be instantiated.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
"""
%
DEFS
)
tests
+=
"""
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()("""
%
DEFS
tests
+=
Iter
(
n
,
'const T%s& v%s'
,
sep
=
""",
"""
)
tests
+=
""") {
return %(v_sum)s > 0;
}
};
"""
%
DEFS
tests
+=
"""
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s("""
%
DEFS
tests
+=
Iter
(
n
,
'const char* e%s'
,
sep
=
""",
"""
)
tests
+=
Iter
(
n
,
""",
const T%s& v%s"""
)
tests
+=
""") {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """
%
DEFS
tests
+=
Iter
(
n
,
'e%s'
,
sep
=
' << " + " << '
)
tests
+=
"""
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
"""
%
DEFS
tests
+=
"""
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()("""
%
DEFS
tests
+=
Iter
(
n
,
'const char* e%s'
,
sep
=
""",
"""
)
tests
+=
Iter
(
n
,
""",
const T%s& v%s"""
)
tests
+=
""") const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
"""
%
DEFS
tests
+=
"""
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
void SetUp() override {
expected_to_finish_ = true;
finished_ = false;"""
%
DEFS
tests
+=
"""
"""
+
Iter
(
n
,
'n%s_ = '
)
+
"""0;
}
"""
tests
+=
"""
void TearDown() override {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests
+=
''
.
join
([
"""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";"""
%
(
i
,
i
+
1
)
for
i
in
OneTo
(
n
)])
tests
+=
"""
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true if and only if the test function is expected to run to finish.
static bool expected_to_finish_;
// true if and only if the test function did run to finish.
static bool finished_;
"""
%
DEFS
tests
+=
Iter
(
n
,
"""
static int n%s_;"""
)
tests
+=
"""
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
"""
%
DEFS
tests
+=
Iter
(
n
,
"""int Predicate%%(n)sTest::n%s_;
"""
)
%
DEFS
tests
+=
"""
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
"""
%
DEFS
def
GenTest
(
use_format
,
use_assert
,
expect_failure
,
use_functor
,
use_user_type
):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true if and only if the assertion is a *_PRED_FORMAT*.
use_assert: true if and only if the assertion is a ASSERT_*.
expect_failure: true if and only if the assertion is expected to fail.
use_functor: true if and only if the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true if and only if the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if
use_assert
:
assrt
=
'ASSERT'
# 'assert' is reserved, so we cannot use
# that identifier here.
else
:
assrt
=
'EXPECT'
assertion
=
assrt
+
'_PRED'
if
use_format
:
pred_format
=
'PredFormat'
assertion
+=
'_FORMAT'
else
:
pred_format
=
'Pred'
assertion
+=
'%(n)s'
%
DEFS
if
use_functor
:
pred_format_type
=
'functor'
pred_format
+=
'Functor%(n)s()'
else
:
pred_format_type
=
'function'
pred_format
+=
'Function%(n)s'
if
not
use_format
:
if
use_user_type
:
pred_format
+=
'Bool'
else
:
pred_format
+=
'Int'
test_name
=
pred_format_type
.
title
()
if
use_user_type
:
arg_type
=
'user-defined type (Bool)'
test_name
+=
'OnUserType'
if
expect_failure
:
arg
=
'Bool(n%s_++)'
else
:
arg
=
'Bool(++n%s_)'
else
:
arg_type
=
'built-in type (int)'
test_name
+=
'OnBuiltInType'
if
expect_failure
:
arg
=
'n%s_++'
else
:
arg
=
'++n%s_'
if
expect_failure
:
successful_or_failed
=
'failed'
expected_or_not
=
'expected.'
test_name
+=
'Failure'
else
:
successful_or_failed
=
'successful'
expected_or_not
=
'UNEXPECTED!'
test_name
+=
'Success'
# A map that defines the values used in the test template.
defs
=
DEFS
.
copy
()
defs
.
update
({
'assert'
:
assrt
,
'assertion'
:
assertion
,
'test_name'
:
test_name
,
'pf_type'
:
pred_format_type
,
'pf'
:
pred_format
,
'arg_type'
:
arg_type
,
'arg'
:
arg
,
'successful'
:
successful_or_failed
,
'expected'
:
expected_or_not
,
})
test
=
"""
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {"""
%
defs
indent
=
(
len
(
assertion
)
+
3
)
*
' '
extra_indent
=
''
if
expect_failure
:
extra_indent
=
' '
if
use_assert
:
test
+=
"""
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else
:
test
+=
"""
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test
+=
'
\n
'
+
extra_indent
+
""" %(assertion)s(%(pf)s"""
%
defs
test
=
test
%
defs
test
+=
Iter
(
n
,
',
\n
'
+
indent
+
extra_indent
+
'%(arg)s'
%
defs
)
test
+=
');
\n
'
+
extra_indent
+
' finished_ = true;
\n
'
if
expect_failure
:
test
+=
' }, "");
\n
'
test
+=
'}
\n
'
return
test
# Generates tests for all 2**6 = 64 combinations.
tests
+=
''
.
join
([
GenTest
(
use_format
,
use_assert
,
expect_failure
,
use_functor
,
use_user_type
)
for
use_format
in
[
0
,
1
]
for
use_assert
in
[
0
,
1
]
for
expect_failure
in
[
0
,
1
]
for
use_functor
in
[
0
,
1
]
for
use_user_type
in
[
0
,
1
]
])
return
tests
def
UnitTestPostamble
():
"""Returns the postamble for the tests."""
return
''
def
GenerateUnitTest
(
n
):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile
(
UNIT_TEST
,
UnitTestPreamble
()
+
''
.
join
([
TestsForArity
(
i
)
for
i
in
OneTo
(
n
)])
+
UnitTestPostamble
())
def
_Main
():
"""The entry point of the script. Generates the header file and its
unit test."""
if
len
(
sys
.
argv
)
!=
2
:
print
__doc__
print
'Author: '
+
__author__
sys
.
exit
(
1
)
n
=
int
(
sys
.
argv
[
1
])
GenerateHeader
(
n
)
GenerateUnitTest
(
n
)
if
__name__
==
'__main__'
:
_Main
()
googletest/scripts/gtest-config.in
deleted
100755 → 0
View file @
eb7e38df
#!/bin/sh
# These variables are automatically filled in by the configure script.
name
=
"@PACKAGE_TARNAME@"
version
=
"@PACKAGE_VERSION@"
show_usage
()
{
echo
"Usage: gtest-config [OPTIONS...]"
}
show_help
()
{
show_usage
cat
<<
\
EOF
The `gtest-config' script provides access to the necessary compile and linking
flags to connect with Google C++ Testing Framework, both in a build prior to
installation, and on the system proper after installation. The installation
overrides may be issued in combination with any other queries, but will only
affect installation queries if called on a built but not installed gtest. The
installation queries may not be issued with any other types of queries, and
only one installation query may be made at a time. The version queries and
compiler flag queries may be combined as desired but not mixed. Different
version queries are always combined with logical "and" semantics, and only the
last of any particular query is used while all previous ones ignored. All
versions must be specified as a sequence of numbers separated by periods.
Compiler flag queries output the union of the sets of flags when combined.
Examples:
gtest-config --min-version=1.0 || echo "Insufficient Google Test version."
g++
$(
gtest-config
--cppflags
--cxxflags
)
-o foo.o -c foo.cpp
g++
$(
gtest-config
--ldflags
--libs
)
-o foo foo.o
# When using a built but not installed Google Test:
g++
$(
../../my_gtest_build/scripts/gtest-config ...
)
...
# When using an installed Google Test, but with installation overrides:
export GTEST_PREFIX="/opt"
g++
$(
gtest-config
--libdir
=
"/opt/lib64"
...
)
...
Help:
--usage brief usage information
--help display this help message
Installation Overrides:
--prefix=<dir> overrides the installation prefix
--exec-prefix=<dir> overrides the executable installation prefix
--libdir=<dir> overrides the library installation prefix
--includedir=<dir> overrides the header file installation prefix
Installation Queries:
--prefix installation prefix
--exec-prefix executable installation prefix
--libdir library installation directory
--includedir header file installation directory
--version the version of the Google Test installation
Version Queries:
--min-version=VERSION return 0 if the version is at least VERSION
--exact-version=VERSION return 0 if the version is exactly VERSION
--max-version=VERSION return 0 if the version is at most VERSION
Compilation Flag Queries:
--cppflags compile flags specific to the C-like preprocessors
--cxxflags compile flags appropriate for C++ programs
--ldflags linker flags
--libs libraries for linking
EOF
}
# This function bounds our version with a min and a max. It uses some clever
# POSIX-compliant variable expansion to portably do all the work in the shell
# and avoid any dependency on a particular "sed" or "awk" implementation.
# Notable is that it will only ever compare the first 3 components of versions.
# Further components will be cleanly stripped off. All versions must be
# unadorned, so "v1.0" will *not* work. The minimum version must be in $1, and
# the max in $2. TODO(chandlerc@google.com): If this ever breaks, we should
# investigate expanding this via autom4te from AS_VERSION_COMPARE rather than
# continuing to maintain our own shell version.
check_versions
()
{
major_version
=
${
version
%%.*
}
minor_version
=
"0"
point_version
=
"0"
if
test
"
${
version
#*.
}
"
!=
"
${
version
}
"
;
then
minor_version
=
${
version
#*.
}
minor_version
=
${
minor_version
%%.*
}
fi
if
test
"
${
version
#*.*.
}
"
!=
"
${
version
}
"
;
then
point_version
=
${
version
#*.*.
}
point_version
=
${
point_version
%%.*
}
fi
min_version
=
"
$1
"
min_major_version
=
${
min_version
%%.*
}
min_minor_version
=
"0"
min_point_version
=
"0"
if
test
"
${
min_version
#*.
}
"
!=
"
${
min_version
}
"
;
then
min_minor_version
=
${
min_version
#*.
}
min_minor_version
=
${
min_minor_version
%%.*
}
fi
if
test
"
${
min_version
#*.*.
}
"
!=
"
${
min_version
}
"
;
then
min_point_version
=
${
min_version
#*.*.
}
min_point_version
=
${
min_point_version
%%.*
}
fi
max_version
=
"
$2
"
max_major_version
=
${
max_version
%%.*
}
max_minor_version
=
"0"
max_point_version
=
"0"
if
test
"
${
max_version
#*.
}
"
!=
"
${
max_version
}
"
;
then
max_minor_version
=
${
max_version
#*.
}
max_minor_version
=
${
max_minor_version
%%.*
}
fi
if
test
"
${
max_version
#*.*.
}
"
!=
"
${
max_version
}
"
;
then
max_point_version
=
${
max_version
#*.*.
}
max_point_version
=
${
max_point_version
%%.*
}
fi
test
$((
$major_version
))
-lt
$((
$min_major_version
))
&&
exit
1
if
test
$((
$major_version
))
-eq
$((
$min_major_version
))
;
then
test
$((
$minor_version
))
-lt
$((
$min_minor_version
))
&&
exit
1
if
test
$((
$minor_version
))
-eq
$((
$min_minor_version
))
;
then
test
$((
$point_version
))
-lt
$((
$min_point_version
))
&&
exit
1
fi
fi
test
$((
$major_version
))
-gt
$((
$max_major_version
))
&&
exit
1
if
test
$((
$major_version
))
-eq
$((
$max_major_version
))
;
then
test
$((
$minor_version
))
-gt
$((
$max_minor_version
))
&&
exit
1
if
test
$((
$minor_version
))
-eq
$((
$max_minor_version
))
;
then
test
$((
$point_version
))
-gt
$((
$max_point_version
))
&&
exit
1
fi
fi
exit
0
}
# Show the usage line when no arguments are specified.
if
test
$#
-eq
0
;
then
show_usage
exit
1
fi
while
test
$#
-gt
0
;
do
case
$1
in
--usage
)
show_usage
;
exit
0
;;
--help
)
show_help
;
exit
0
;;
# Installation overrides
--prefix
=
*
)
GTEST_PREFIX
=
${
1
#--prefix=
}
;;
--exec-prefix
=
*
)
GTEST_EXEC_PREFIX
=
${
1
#--exec-prefix=
}
;;
--libdir
=
*
)
GTEST_LIBDIR
=
${
1
#--libdir=
}
;;
--includedir
=
*
)
GTEST_INCLUDEDIR
=
${
1
#--includedir=
}
;;
# Installation queries
--prefix
|
--exec-prefix
|
--libdir
|
--includedir
|
--version
)
if
test
-n
"
${
do_query
}
"
;
then
show_usage
exit
1
fi
do_query
=
${
1
#--
}
;;
# Version checking
--min-version
=
*
)
do_check_versions
=
yes
min_version
=
${
1
#--min-version=
}
;;
--max-version
=
*
)
do_check_versions
=
yes
max_version
=
${
1
#--max-version=
}
;;
--exact-version
=
*
)
do_check_versions
=
yes
exact_version
=
${
1
#--exact-version=
}
;;
# Compiler flag output
--cppflags
)
echo_cppflags
=
yes
;;
--cxxflags
)
echo_cxxflags
=
yes
;;
--ldflags
)
echo_ldflags
=
yes
;;
--libs
)
echo_libs
=
yes
;;
# Everything else is an error
*
)
show_usage
;
exit
1
;;
esac
shift
done
# These have defaults filled in by the configure script but can also be
# overridden by environment variables or command line parameters.
prefix
=
"
${
GTEST_PREFIX
:-
@prefix@
}
"
exec_prefix
=
"
${
GTEST_EXEC_PREFIX
:-
@exec_prefix@
}
"
libdir
=
"
${
GTEST_LIBDIR
:-
@libdir@
}
"
includedir
=
"
${
GTEST_INCLUDEDIR
:-
@includedir@
}
"
# We try and detect if our binary is not located at its installed location. If
# it's not, we provide variables pointing to the source and build tree rather
# than to the install tree. This allows building against a just-built gtest
# rather than an installed gtest.
bindir
=
"@bindir@"
this_relative_bindir
=
`
dirname
$0
`
this_bindir
=
`
cd
${
this_relative_bindir
}
;
pwd
-P
`
if
test
"
${
this_bindir
}
"
=
"
${
this_bindir
%
${
bindir
}}
"
;
then
# The path to the script doesn't end in the bindir sequence from Autoconf,
# assume that we are in a build tree.
build_dir
=
`
dirname
${
this_bindir
}
`
src_dir
=
`
cd
${
this_bindir
}
;
cd
@top_srcdir@
;
pwd
-P
`
# TODO(chandlerc@google.com): This is a dangerous dependency on libtool, we
# should work to remove it, and/or remove libtool altogether, replacing it
# with direct references to the library and a link path.
gtest_libs
=
"
${
build_dir
}
/lib/libgtest.la @PTHREAD_CFLAGS@ @PTHREAD_LIBS@"
gtest_ldflags
=
""
# We provide hooks to include from either the source or build dir, where the
# build dir is always preferred. This will potentially allow us to write
# build rules for generated headers and have them automatically be preferred
# over provided versions.
gtest_cppflags
=
"-I
${
build_dir
}
/include -I
${
src_dir
}
/include"
gtest_cxxflags
=
"@PTHREAD_CFLAGS@"
else
# We're using an installed gtest, although it may be staged under some
# prefix. Assume (as our own libraries do) that we can resolve the prefix,
# and are present in the dynamic link paths.
gtest_ldflags
=
"-L
${
libdir
}
"
gtest_libs
=
"-l
${
name
}
@PTHREAD_CFLAGS@ @PTHREAD_LIBS@"
gtest_cppflags
=
"-I
${
includedir
}
"
gtest_cxxflags
=
"@PTHREAD_CFLAGS@"
fi
# Do an installation query if requested.
if
test
-n
"
$do_query
"
;
then
case
$do_query
in
prefix
)
echo
$prefix
;
exit
0
;;
exec-prefix
)
echo
$exec_prefix
;
exit
0
;;
libdir
)
echo
$libdir
;
exit
0
;;
includedir
)
echo
$includedir
;
exit
0
;;
version
)
echo
$version
;
exit
0
;;
*
)
show_usage
;
exit
1
;;
esac
fi
# Do a version check if requested.
if
test
"
$do_check_versions
"
=
"yes"
;
then
# Make sure we didn't receive a bad combination of parameters.
test
"
$echo_cppflags
"
=
"yes"
&&
show_usage
&&
exit
1
test
"
$echo_cxxflags
"
=
"yes"
&&
show_usage
&&
exit
1
test
"
$echo_ldflags
"
=
"yes"
&&
show_usage
&&
exit
1
test
"
$echo_libs
"
=
"yes"
&&
show_usage
&&
exit
1
if
test
"
$exact_version
"
!=
""
;
then
check_versions
$exact_version
$exact_version
# unreachable
else
check_versions
${
min_version
:-
0
.0.0
}
${
max_version
:-
9999
.9999.9999
}
# unreachable
fi
fi
# Do the output in the correct order so that these can be used in-line of
# a compiler invocation.
output
=
""
test
"
$echo_cppflags
"
=
"yes"
&&
output
=
"
$output
$gtest_cppflags
"
test
"
$echo_cxxflags
"
=
"yes"
&&
output
=
"
$output
$gtest_cxxflags
"
test
"
$echo_ldflags
"
=
"yes"
&&
output
=
"
$output
$gtest_ldflags
"
test
"
$echo_libs
"
=
"yes"
&&
output
=
"
$output
$gtest_libs
"
echo
$output
exit
0
googletest/scripts/release_docs.py
deleted
100755 → 0
View file @
eb7e38df
#!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for branching Google Test/Mock wiki pages for a new version.
SYNOPSIS
release_docs.py NEW_RELEASE_VERSION
Google Test and Google Mock's external user documentation is in
interlinked wiki files. When we release a new version of
Google Test or Google Mock, we need to branch the wiki files
such that users of a specific version of Google Test/Mock can
look up documentation relevant for that version. This script
automates that process by:
- branching the current wiki pages (which document the
behavior of the SVN trunk head) to pages for the specified
version (e.g. branching FAQ.wiki to V2_6_FAQ.wiki when
NEW_RELEASE_VERSION is 2.6);
- updating the links in the branched files to point to the branched
version (e.g. a link in V2_6_FAQ.wiki that pointed to
Primer.wiki#Anchor will now point to V2_6_Primer.wiki#Anchor).
NOTE: NEW_RELEASE_VERSION must be a NEW version number for
which the wiki pages don't yet exist; otherwise you'll get SVN
errors like "svn: Path 'V1_7_PumpManual.wiki' is not a
directory" when running the script.
EXAMPLE
$ cd PATH/TO/GTEST_SVN_WORKSPACE/trunk
$ scripts/release_docs.py 2.6 # create wiki pages for v2.6
$ svn status # verify the file list
$ svn diff # verify the file contents
$ svn commit -m "release wiki pages for v2.6"
"""
__author__
=
'wan@google.com (Zhanyong Wan)'
import
os
import
re
import
sys
import
common
# Wiki pages that shouldn't be branched for every gtest/gmock release.
GTEST_UNVERSIONED_WIKIS
=
[
'DevGuide.wiki'
]
GMOCK_UNVERSIONED_WIKIS
=
[
'DesignDoc.wiki'
,
'DevGuide.wiki'
,
'KnownIssues.wiki'
]
def
DropWikiSuffix
(
wiki_filename
):
"""Removes the .wiki suffix (if any) from the given filename."""
return
(
wiki_filename
[:
-
len
(
'.wiki'
)]
if
wiki_filename
.
endswith
(
'.wiki'
)
else
wiki_filename
)
class
WikiBrancher
(
object
):
"""Branches ..."""
def
__init__
(
self
,
dot_version
):
self
.
project
,
svn_root_path
=
common
.
GetSvnInfo
()
if
self
.
project
not
in
(
'googletest'
,
'googlemock'
):
sys
.
exit
(
'This script must be run in a gtest or gmock SVN workspace.'
)
self
.
wiki_dir
=
svn_root_path
+
'/wiki'
# Turn '2.6' to 'V2_6_'.
self
.
version_prefix
=
'V'
+
dot_version
.
replace
(
'.'
,
'_'
)
+
'_'
self
.
files_to_branch
=
self
.
GetFilesToBranch
()
page_names
=
[
DropWikiSuffix
(
f
)
for
f
in
self
.
files_to_branch
]
# A link to Foo.wiki is in one of the following forms:
# [Foo words]
# [Foo#Anchor words]
# [http://code.google.com/.../wiki/Foo words]
# [http://code.google.com/.../wiki/Foo#Anchor words]
# We want to replace 'Foo' with 'V2_6_Foo' in the above cases.
self
.
search_for_re
=
re
.
compile
(
# This regex matches either
# [Foo
# or
# /wiki/Foo
# followed by a space or a #, where Foo is the name of an
# unversioned wiki page.
r
'(\[|/wiki/)(%s)([ #])'
%
'|'
.
join
(
page_names
))
self
.
replace_with
=
r
'\1%s\2\3'
%
(
self
.
version_prefix
,)
def
GetFilesToBranch
(
self
):
"""Returns a list of .wiki file names that need to be branched."""
unversioned_wikis
=
(
GTEST_UNVERSIONED_WIKIS
if
self
.
project
==
'googletest'
else
GMOCK_UNVERSIONED_WIKIS
)
return
[
f
for
f
in
os
.
listdir
(
self
.
wiki_dir
)
if
(
f
.
endswith
(
'.wiki'
)
and
not
re
.
match
(
r
'^V\d'
,
f
)
and
# Excluded versioned .wiki files.
f
not
in
unversioned_wikis
)]
def
BranchFiles
(
self
):
"""Branches the .wiki files needed to be branched."""
print
'Branching %d .wiki files:'
%
(
len
(
self
.
files_to_branch
),)
os
.
chdir
(
self
.
wiki_dir
)
for
f
in
self
.
files_to_branch
:
command
=
'svn cp %s %s%s'
%
(
f
,
self
.
version_prefix
,
f
)
print
command
os
.
system
(
command
)
def
UpdateLinksInBranchedFiles
(
self
):
for
f
in
self
.
files_to_branch
:
source_file
=
os
.
path
.
join
(
self
.
wiki_dir
,
f
)
versioned_file
=
os
.
path
.
join
(
self
.
wiki_dir
,
self
.
version_prefix
+
f
)
print
'Updating links in %s.'
%
(
versioned_file
,)
text
=
file
(
source_file
,
'r'
).
read
()
new_text
=
self
.
search_for_re
.
sub
(
self
.
replace_with
,
text
)
file
(
versioned_file
,
'w'
).
write
(
new_text
)
def
main
():
if
len
(
sys
.
argv
)
!=
2
:
sys
.
exit
(
__doc__
)
brancher
=
WikiBrancher
(
sys
.
argv
[
1
])
brancher
.
BranchFiles
()
brancher
.
UpdateLinksInBranchedFiles
()
if
__name__
==
'__main__'
:
main
()
googletest/scripts/run_with_path.py
deleted
100755 → 0
View file @
eb7e38df
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Runs program specified in the command line with the substituted PATH.
This script is needed for to support building under Pulse which is unable
to override the existing PATH variable.
"""
import
os
import
subprocess
import
sys
SUBST_PATH_ENV_VAR_NAME
=
"SUBST_PATH"
def
main
():
if
SUBST_PATH_ENV_VAR_NAME
in
os
.
environ
:
os
.
environ
[
"PATH"
]
=
os
.
environ
[
SUBST_PATH_ENV_VAR_NAME
]
exit_code
=
subprocess
.
Popen
(
sys
.
argv
[
1
:]).
wait
()
# exit_code is negative (-signal) if the process has been terminated by
# a signal. Returning negative exit code is not portable and so we return
# 100 instead.
if
exit_code
<
0
:
exit_code
=
100
sys
.
exit
(
exit_code
)
if
__name__
==
"__main__"
:
main
()
googletest/scripts/test/Makefile
deleted
100644 → 0
View file @
eb7e38df
# A Makefile for fusing Google Test and building a sample test against it.
#
# SYNOPSIS:
#
# make [all] - makes everything.
# make TARGET - makes the given target.
# make check - makes everything and runs the built sample test.
# make clean - removes all files generated by make.
# Points to the root of fused Google Test, relative to where this file is.
FUSED_GTEST_DIR
=
output
# Paths to the fused gtest files.
FUSED_GTEST_H
=
$(FUSED_GTEST_DIR)
/gtest/gtest.h
FUSED_GTEST_ALL_CC
=
$(FUSED_GTEST_DIR)
/gtest/gtest-all.cc
# Where to find the sample test.
SAMPLE_DIR
=
../../samples
# Where to find gtest_main.cc.
GTEST_MAIN_CC
=
../../src/gtest_main.cc
# Flags passed to the preprocessor.
# We have no idea here whether pthreads is available in the system, so
# disable its use.
CPPFLAGS
+=
-I
$(FUSED_GTEST_DIR)
-DGTEST_HAS_PTHREAD
=
0
# Flags passed to the C++ compiler.
CXXFLAGS
+=
-g
all
:
sample1_unittest
check
:
all
./sample1_unittest
clean
:
rm
-rf
$(FUSED_GTEST_DIR)
sample1_unittest
*
.o
$(FUSED_GTEST_H)
:
../fuse_gtest_files.py
$(FUSED_GTEST_DIR)
$(FUSED_GTEST_ALL_CC)
:
../fuse_gtest_files.py
$(FUSED_GTEST_DIR)
gtest-all.o
:
$(FUSED_GTEST_H) $(FUSED_GTEST_ALL_CC)
$(CXX)
$(CPPFLAGS)
$(CXXFLAGS)
-c
$(FUSED_GTEST_DIR)
/gtest/gtest-all.cc
gtest_main.o
:
$(FUSED_GTEST_H) $(GTEST_MAIN_CC)
$(CXX)
$(CPPFLAGS)
$(CXXFLAGS)
-c
$(GTEST_MAIN_CC)
sample1.o
:
$(SAMPLE_DIR)/sample1.cc $(SAMPLE_DIR)/sample1.h
$(CXX)
$(CPPFLAGS)
$(CXXFLAGS)
-c
$(SAMPLE_DIR)
/sample1.cc
sample1_unittest.o
:
$(SAMPLE_DIR)/sample1_unittest.cc
\
$(SAMPLE_DIR)/sample1.h $(FUSED_GTEST_H)
$(CXX)
$(CPPFLAGS)
$(CXXFLAGS)
-c
$(SAMPLE_DIR)
/sample1_unittest.cc
sample1_unittest
:
sample1.o sample1_unittest.o gtest-all.o gtest_main.o
$(CXX)
$(CPPFLAGS)
$(CXXFLAGS)
$^
-o
$@
googletest/scripts/upload.py
deleted
100755 → 0
View file @
eb7e38df
#!/usr/bin/env python
#
# Copyright 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import
cookielib
import
getpass
import
logging
import
md5
import
mimetypes
import
optparse
import
os
import
re
import
socket
import
subprocess
import
sys
import
urllib
import
urllib2
import
urlparse
try
:
import
readline
except
ImportError
:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity
=
1
# Max size of patch or base file.
MAX_UPLOAD_SIZE
=
900
*
1024
def
GetEmail
(
prompt
):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name
=
os
.
path
.
expanduser
(
"~/.last_codereview_email_address"
)
last_email
=
""
if
os
.
path
.
exists
(
last_email_file_name
):
try
:
last_email_file
=
open
(
last_email_file_name
,
"r"
)
last_email
=
last_email_file
.
readline
().
strip
(
"
\n
"
)
last_email_file
.
close
()
prompt
+=
" [%s]"
%
last_email
except
IOError
,
e
:
pass
email
=
raw_input
(
prompt
+
": "
).
strip
()
if
email
:
try
:
last_email_file
=
open
(
last_email_file_name
,
"w"
)
last_email_file
.
write
(
email
)
last_email_file
.
close
()
except
IOError
,
e
:
pass
else
:
email
=
last_email
return
email
def
StatusUpdate
(
msg
):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if
verbosity
>
0
:
print
msg
def
ErrorExit
(
msg
):
"""Print an error message to stderr and exit."""
print
>>
sys
.
stderr
,
msg
sys
.
exit
(
1
)
class
ClientLoginError
(
urllib2
.
HTTPError
):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def
__init__
(
self
,
url
,
code
,
msg
,
headers
,
args
):
urllib2
.
HTTPError
.
__init__
(
self
,
url
,
code
,
msg
,
headers
,
None
)
self
.
args
=
args
self
.
reason
=
args
[
"Error"
]
class
AbstractRpcServer
(
object
):
"""Provides a common interface for a simple RPC server."""
def
__init__
(
self
,
host
,
auth_function
,
host_override
=
None
,
extra_headers
=
{},
save_cookies
=
False
):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self
.
host
=
host
self
.
host_override
=
host_override
self
.
auth_function
=
auth_function
self
.
authenticated
=
False
self
.
extra_headers
=
extra_headers
self
.
save_cookies
=
save_cookies
self
.
opener
=
self
.
_GetOpener
()
if
self
.
host_override
:
logging
.
info
(
"Server: %s; Host: %s"
,
self
.
host
,
self
.
host_override
)
else
:
logging
.
info
(
"Server: %s"
,
self
.
host
)
def
_GetOpener
(
self
):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise
NotImplementedError
()
def
_CreateRequest
(
self
,
url
,
data
=
None
):
"""Creates a new urllib request."""
logging
.
debug
(
"Creating request for: '%s' with payload:
\n
%s"
,
url
,
data
)
req
=
urllib2
.
Request
(
url
,
data
=
data
)
if
self
.
host_override
:
req
.
add_header
(
"Host"
,
self
.
host_override
)
for
key
,
value
in
self
.
extra_headers
.
iteritems
():
req
.
add_header
(
key
,
value
)
return
req
def
_GetAuthToken
(
self
,
email
,
password
):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type
=
"GOOGLE"
if
self
.
host
.
endswith
(
".google.com"
):
# Needed for use inside Google.
account_type
=
"HOSTED"
req
=
self
.
_CreateRequest
(
url
=
"https://www.google.com/accounts/ClientLogin"
,
data
=
urllib
.
urlencode
({
"Email"
:
email
,
"Passwd"
:
password
,
"service"
:
"ah"
,
"source"
:
"rietveld-codereview-upload"
,
"accountType"
:
account_type
,
}),
)
try
:
response
=
self
.
opener
.
open
(
req
)
response_body
=
response
.
read
()
response_dict
=
dict
(
x
.
split
(
"="
)
for
x
in
response_body
.
split
(
"
\n
"
)
if
x
)
return
response_dict
[
"Auth"
]
except
urllib2
.
HTTPError
,
e
:
if
e
.
code
==
403
:
body
=
e
.
read
()
response_dict
=
dict
(
x
.
split
(
"="
,
1
)
for
x
in
body
.
split
(
"
\n
"
)
if
x
)
raise
ClientLoginError
(
req
.
get_full_url
(),
e
.
code
,
e
.
msg
,
e
.
headers
,
response_dict
)
else
:
raise
def
_GetAuthCookie
(
self
,
auth_token
):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location
=
"http://localhost/"
args
=
{
"continue"
:
continue_location
,
"auth"
:
auth_token
}
req
=
self
.
_CreateRequest
(
"http://%s/_ah/login?%s"
%
(
self
.
host
,
urllib
.
urlencode
(
args
)))
try
:
response
=
self
.
opener
.
open
(
req
)
except
urllib2
.
HTTPError
,
e
:
response
=
e
if
(
response
.
code
!=
302
or
response
.
info
()[
"location"
]
!=
continue_location
):
raise
urllib2
.
HTTPError
(
req
.
get_full_url
(),
response
.
code
,
response
.
msg
,
response
.
headers
,
response
.
fp
)
self
.
authenticated
=
True
def
_Authenticate
(
self
):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see https://developers.google.com/identity/protocols/AuthForInstalledApps).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for
i
in
range
(
3
):
credentials
=
self
.
auth_function
()
try
:
auth_token
=
self
.
_GetAuthToken
(
credentials
[
0
],
credentials
[
1
])
except
ClientLoginError
,
e
:
if
e
.
reason
==
"BadAuthentication"
:
print
>>
sys
.
stderr
,
"Invalid username or password."
continue
if
e
.
reason
==
"CaptchaRequired"
:
print
>>
sys
.
stderr
,
(
"Please go to
\n
"
"https://www.google.com/accounts/DisplayUnlockCaptcha
\n
"
"and verify you are a human. Then try again."
)
break
if
e
.
reason
==
"NotVerified"
:
print
>>
sys
.
stderr
,
"Account not verified."
break
if
e
.
reason
==
"TermsNotAgreed"
:
print
>>
sys
.
stderr
,
"User has not agreed to TOS."
break
if
e
.
reason
==
"AccountDeleted"
:
print
>>
sys
.
stderr
,
"The user account has been deleted."
break
if
e
.
reason
==
"AccountDisabled"
:
print
>>
sys
.
stderr
,
"The user account has been disabled."
break
if
e
.
reason
==
"ServiceDisabled"
:
print
>>
sys
.
stderr
,
(
"The user's access to the service has been "
"disabled."
)
break
if
e
.
reason
==
"ServiceUnavailable"
:
print
>>
sys
.
stderr
,
"The service is not available; try again later."
break
raise
self
.
_GetAuthCookie
(
auth_token
)
return
def
Send
(
self
,
request_path
,
payload
=
None
,
content_type
=
"application/octet-stream"
,
timeout
=
None
,
**
kwargs
):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if
not
self
.
authenticated
:
self
.
_Authenticate
()
old_timeout
=
socket
.
getdefaulttimeout
()
socket
.
setdefaulttimeout
(
timeout
)
try
:
tries
=
0
while
True
:
tries
+=
1
args
=
dict
(
kwargs
)
url
=
"http://%s%s"
%
(
self
.
host
,
request_path
)
if
args
:
url
+=
"?"
+
urllib
.
urlencode
(
args
)
req
=
self
.
_CreateRequest
(
url
=
url
,
data
=
payload
)
req
.
add_header
(
"Content-Type"
,
content_type
)
try
:
f
=
self
.
opener
.
open
(
req
)
response
=
f
.
read
()
f
.
close
()
return
response
except
urllib2
.
HTTPError
,
e
:
if
tries
>
3
:
raise
elif
e
.
code
==
401
:
self
.
_Authenticate
()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else
:
raise
finally
:
socket
.
setdefaulttimeout
(
old_timeout
)
class
HttpRpcServer
(
AbstractRpcServer
):
"""Provides a simplified RPC-style interface for HTTP requests."""
def
_Authenticate
(
self
):
"""Save the cookie jar after authentication."""
super
(
HttpRpcServer
,
self
).
_Authenticate
()
if
self
.
save_cookies
:
StatusUpdate
(
"Saving authentication cookies to %s"
%
self
.
cookie_file
)
self
.
cookie_jar
.
save
()
def
_GetOpener
(
self
):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener
=
urllib2
.
OpenerDirector
()
opener
.
add_handler
(
urllib2
.
ProxyHandler
())
opener
.
add_handler
(
urllib2
.
UnknownHandler
())
opener
.
add_handler
(
urllib2
.
HTTPHandler
())
opener
.
add_handler
(
urllib2
.
HTTPDefaultErrorHandler
())
opener
.
add_handler
(
urllib2
.
HTTPSHandler
())
opener
.
add_handler
(
urllib2
.
HTTPErrorProcessor
())
if
self
.
save_cookies
:
self
.
cookie_file
=
os
.
path
.
expanduser
(
"~/.codereview_upload_cookies"
)
self
.
cookie_jar
=
cookielib
.
MozillaCookieJar
(
self
.
cookie_file
)
if
os
.
path
.
exists
(
self
.
cookie_file
):
try
:
self
.
cookie_jar
.
load
()
self
.
authenticated
=
True
StatusUpdate
(
"Loaded authentication cookies from %s"
%
self
.
cookie_file
)
except
(
cookielib
.
LoadError
,
IOError
):
# Failed to load cookies - just ignore them.
pass
else
:
# Create an empty cookie file with mode 600
fd
=
os
.
open
(
self
.
cookie_file
,
os
.
O_CREAT
,
0600
)
os
.
close
(
fd
)
# Always chmod the cookie file
os
.
chmod
(
self
.
cookie_file
,
0600
)
else
:
# Don't save cookies across runs of update.py.
self
.
cookie_jar
=
cookielib
.
CookieJar
()
opener
.
add_handler
(
urllib2
.
HTTPCookieProcessor
(
self
.
cookie_jar
))
return
opener
parser
=
optparse
.
OptionParser
(
usage
=
"%prog [options] [-- diff_options]"
)
parser
.
add_option
(
"-y"
,
"--assume_yes"
,
action
=
"store_true"
,
dest
=
"assume_yes"
,
default
=
False
,
help
=
"Assume that the answer to yes/no questions is 'yes'."
)
# Logging
group
=
parser
.
add_option_group
(
"Logging options"
)
group
.
add_option
(
"-q"
,
"--quiet"
,
action
=
"store_const"
,
const
=
0
,
dest
=
"verbose"
,
help
=
"Print errors only."
)
group
.
add_option
(
"-v"
,
"--verbose"
,
action
=
"store_const"
,
const
=
2
,
dest
=
"verbose"
,
default
=
1
,
help
=
"Print info level logs (default)."
)
group
.
add_option
(
"--noisy"
,
action
=
"store_const"
,
const
=
3
,
dest
=
"verbose"
,
help
=
"Print all logs."
)
# Review server
group
=
parser
.
add_option_group
(
"Review server options"
)
group
.
add_option
(
"-s"
,
"--server"
,
action
=
"store"
,
dest
=
"server"
,
default
=
"codereview.appspot.com"
,
metavar
=
"SERVER"
,
help
=
(
"The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."
))
group
.
add_option
(
"-e"
,
"--email"
,
action
=
"store"
,
dest
=
"email"
,
metavar
=
"EMAIL"
,
default
=
None
,
help
=
"The username to use. Will prompt if omitted."
)
group
.
add_option
(
"-H"
,
"--host"
,
action
=
"store"
,
dest
=
"host"
,
metavar
=
"HOST"
,
default
=
None
,
help
=
"Overrides the Host header sent with all RPCs."
)
group
.
add_option
(
"--no_cookies"
,
action
=
"store_false"
,
dest
=
"save_cookies"
,
default
=
True
,
help
=
"Do not save authentication cookies to local disk."
)
# Issue
group
=
parser
.
add_option_group
(
"Issue options"
)
group
.
add_option
(
"-d"
,
"--description"
,
action
=
"store"
,
dest
=
"description"
,
metavar
=
"DESCRIPTION"
,
default
=
None
,
help
=
"Optional description when creating an issue."
)
group
.
add_option
(
"-f"
,
"--description_file"
,
action
=
"store"
,
dest
=
"description_file"
,
metavar
=
"DESCRIPTION_FILE"
,
default
=
None
,
help
=
"Optional path of a file that contains "
"the description when creating an issue."
)
group
.
add_option
(
"-r"
,
"--reviewers"
,
action
=
"store"
,
dest
=
"reviewers"
,
metavar
=
"REVIEWERS"
,
default
=
None
,
help
=
"Add reviewers (comma separated email addresses)."
)
group
.
add_option
(
"--cc"
,
action
=
"store"
,
dest
=
"cc"
,
metavar
=
"CC"
,
default
=
None
,
help
=
"Add CC (comma separated email addresses)."
)
# Upload options
group
=
parser
.
add_option_group
(
"Patch options"
)
group
.
add_option
(
"-m"
,
"--message"
,
action
=
"store"
,
dest
=
"message"
,
metavar
=
"MESSAGE"
,
default
=
None
,
help
=
"A message to identify the patch. "
"Will prompt if omitted."
)
group
.
add_option
(
"-i"
,
"--issue"
,
type
=
"int"
,
action
=
"store"
,
metavar
=
"ISSUE"
,
default
=
None
,
help
=
"Issue number to which to add. Defaults to new issue."
)
group
.
add_option
(
"--download_base"
,
action
=
"store_true"
,
dest
=
"download_base"
,
default
=
False
,
help
=
"Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs)."
)
group
.
add_option
(
"--rev"
,
action
=
"store"
,
dest
=
"revision"
,
metavar
=
"REV"
,
default
=
None
,
help
=
"Branch/tree/revision to diff against (used by DVCS)."
)
group
.
add_option
(
"--send_mail"
,
action
=
"store_true"
,
dest
=
"send_mail"
,
default
=
False
,
help
=
"Send notification email to reviewers."
)
def
GetRpcServer
(
options
):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class
=
HttpRpcServer
def
GetUserCredentials
():
"""Prompts the user for a username and password."""
email
=
options
.
email
if
email
is
None
:
email
=
GetEmail
(
"Email (login for uploading to %s)"
%
options
.
server
)
password
=
getpass
.
getpass
(
"Password for %s: "
%
email
)
return
(
email
,
password
)
# If this is the dev_appserver, use fake authentication.
host
=
(
options
.
host
or
options
.
server
).
lower
()
if
host
==
"localhost"
or
host
.
startswith
(
"localhost:"
):
email
=
options
.
email
if
email
is
None
:
email
=
"test@example.com"
logging
.
info
(
"Using debug user %s. Override with --email"
%
email
)
server
=
rpc_server_class
(
options
.
server
,
lambda
:
(
email
,
"password"
),
host_override
=
options
.
host
,
extra_headers
=
{
"Cookie"
:
'dev_appserver_login="%s:False"'
%
email
},
save_cookies
=
options
.
save_cookies
)
# Don't try to talk to ClientLogin.
server
.
authenticated
=
True
return
server
return
rpc_server_class
(
options
.
server
,
GetUserCredentials
,
host_override
=
options
.
host
,
save_cookies
=
options
.
save_cookies
)
def
EncodeMultipartFormData
(
fields
,
files
):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
https://web.archive.org/web/20160116052001/code.activestate.com/recipes/146306
"""
BOUNDARY
=
'-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF
=
'
\r\n
'
lines
=
[]
for
(
key
,
value
)
in
fields
:
lines
.
append
(
'--'
+
BOUNDARY
)
lines
.
append
(
'Content-Disposition: form-data; name="%s"'
%
key
)
lines
.
append
(
''
)
lines
.
append
(
value
)
for
(
key
,
filename
,
value
)
in
files
:
lines
.
append
(
'--'
+
BOUNDARY
)
lines
.
append
(
'Content-Disposition: form-data; name="%s"; filename="%s"'
%
(
key
,
filename
))
lines
.
append
(
'Content-Type: %s'
%
GetContentType
(
filename
))
lines
.
append
(
''
)
lines
.
append
(
value
)
lines
.
append
(
'--'
+
BOUNDARY
+
'--'
)
lines
.
append
(
''
)
body
=
CRLF
.
join
(
lines
)
content_type
=
'multipart/form-data; boundary=%s'
%
BOUNDARY
return
content_type
,
body
def
GetContentType
(
filename
):
"""Helper to guess the content-type from the filename."""
return
mimetypes
.
guess_type
(
filename
)[
0
]
or
'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell
=
sys
.
platform
.
startswith
(
"win"
)
def
RunShellWithReturnCode
(
command
,
print_output
=
False
,
universal_newlines
=
True
):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging
.
info
(
"Running %s"
,
command
)
p
=
subprocess
.
Popen
(
command
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
,
shell
=
use_shell
,
universal_newlines
=
universal_newlines
)
if
print_output
:
output_array
=
[]
while
True
:
line
=
p
.
stdout
.
readline
()
if
not
line
:
break
print
line
.
strip
(
"
\n
"
)
output_array
.
append
(
line
)
output
=
""
.
join
(
output_array
)
else
:
output
=
p
.
stdout
.
read
()
p
.
wait
()
errout
=
p
.
stderr
.
read
()
if
print_output
and
errout
:
print
>>
sys
.
stderr
,
errout
p
.
stdout
.
close
()
p
.
stderr
.
close
()
return
output
,
p
.
returncode
def
RunShell
(
command
,
silent_ok
=
False
,
universal_newlines
=
True
,
print_output
=
False
):
data
,
retcode
=
RunShellWithReturnCode
(
command
,
print_output
,
universal_newlines
)
if
retcode
:
ErrorExit
(
"Got error status from %s:
\n
%s"
%
(
command
,
data
))
if
not
silent_ok
and
not
data
:
ErrorExit
(
"No output from %s"
%
command
)
return
data
class
VersionControlSystem
(
object
):
"""Abstract base class providing an interface to the VCS."""
def
__init__
(
self
,
options
):
"""Constructor.
Args:
options: Command line options.
"""
self
.
options
=
options
def
GenerateDiff
(
self
,
args
):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise
NotImplementedError
(
"abstract method -- subclass %s must override"
%
self
.
__class__
)
def
GetUnknownFiles
(
self
):
"""Return a list of files unknown to the VCS."""
raise
NotImplementedError
(
"abstract method -- subclass %s must override"
%
self
.
__class__
)
def
CheckForUnknownFiles
(
self
):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files
=
self
.
GetUnknownFiles
()
if
unknown_files
:
print
"The following files are not added to version control:"
for
line
in
unknown_files
:
print
line
prompt
=
"Are you sure to continue?(y/N) "
answer
=
raw_input
(
prompt
).
strip
()
if
answer
!=
"y"
:
ErrorExit
(
"User aborted"
)
def
GetBaseFile
(
self
,
filename
):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise
NotImplementedError
(
"abstract method -- subclass %s must override"
%
self
.
__class__
)
def
GetBaseFiles
(
self
,
diff
):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files
=
{}
for
line
in
diff
.
splitlines
(
True
):
if
line
.
startswith
(
'Index:'
)
or
line
.
startswith
(
'Property changes on:'
):
unused
,
filename
=
line
.
split
(
':'
,
1
)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename
=
filename
.
strip
().
replace
(
'
\\
'
,
'/'
)
files
[
filename
]
=
self
.
GetBaseFile
(
filename
)
return
files
def
UploadBaseFiles
(
self
,
issue
,
rpc_server
,
patch_list
,
patchset
,
options
,
files
):
"""Uploads the base files (and if necessary, the current ones as well)."""
def
UploadFile
(
filename
,
file_id
,
content
,
is_binary
,
status
,
is_base
):
"""Uploads a file to the server."""
file_too_large
=
False
if
is_base
:
type
=
"base"
else
:
type
=
"current"
if
len
(
content
)
>
MAX_UPLOAD_SIZE
:
print
(
"Not uploading the %s file for %s because it's too large."
%
(
type
,
filename
))
file_too_large
=
True
content
=
""
checksum
=
md5
.
new
(
content
).
hexdigest
()
if
options
.
verbose
>
0
and
not
file_too_large
:
print
"Uploading %s file for %s"
%
(
type
,
filename
)
url
=
"/%d/upload_content/%d/%d"
%
(
int
(
issue
),
int
(
patchset
),
file_id
)
form_fields
=
[(
"filename"
,
filename
),
(
"status"
,
status
),
(
"checksum"
,
checksum
),
(
"is_binary"
,
str
(
is_binary
)),
(
"is_current"
,
str
(
not
is_base
)),
]
if
file_too_large
:
form_fields
.
append
((
"file_too_large"
,
"1"
))
if
options
.
email
:
form_fields
.
append
((
"user"
,
options
.
email
))
ctype
,
body
=
EncodeMultipartFormData
(
form_fields
,
[(
"data"
,
filename
,
content
)])
response_body
=
rpc_server
.
Send
(
url
,
body
,
content_type
=
ctype
)
if
not
response_body
.
startswith
(
"OK"
):
StatusUpdate
(
" --> %s"
%
response_body
)
sys
.
exit
(
1
)
patches
=
dict
()
[
patches
.
setdefault
(
v
,
k
)
for
k
,
v
in
patch_list
]
for
filename
in
patches
.
keys
():
base_content
,
new_content
,
is_binary
,
status
=
files
[
filename
]
file_id_str
=
patches
.
get
(
filename
)
if
file_id_str
.
find
(
"nobase"
)
!=
-
1
:
base_content
=
None
file_id_str
=
file_id_str
[
file_id_str
.
rfind
(
"_"
)
+
1
:]
file_id
=
int
(
file_id_str
)
if
base_content
!=
None
:
UploadFile
(
filename
,
file_id
,
base_content
,
is_binary
,
status
,
True
)
if
new_content
!=
None
:
UploadFile
(
filename
,
file_id
,
new_content
,
is_binary
,
status
,
False
)
def
IsImage
(
self
,
filename
):
"""Returns true if the filename has an image extension."""
mimetype
=
mimetypes
.
guess_type
(
filename
)[
0
]
if
not
mimetype
:
return
False
return
mimetype
.
startswith
(
"image/"
)
class
SubversionVCS
(
VersionControlSystem
):
"""Implementation of the VersionControlSystem interface for Subversion."""
def
__init__
(
self
,
options
):
super
(
SubversionVCS
,
self
).
__init__
(
options
)
if
self
.
options
.
revision
:
match
=
re
.
match
(
r
"(\d+)(:(\d+))?"
,
self
.
options
.
revision
)
if
not
match
:
ErrorExit
(
"Invalid Subversion revision %s."
%
self
.
options
.
revision
)
self
.
rev_start
=
match
.
group
(
1
)
self
.
rev_end
=
match
.
group
(
3
)
else
:
self
.
rev_start
=
self
.
rev_end
=
None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (output for start rev and end rev).
self
.
svnls_cache
=
{}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required
=
self
.
options
.
download_base
or
self
.
options
.
revision
is
not
None
self
.
svn_base
=
self
.
_GuessBase
(
required
)
def
GuessBase
(
self
,
required
):
"""Wrapper for _GuessBase."""
return
self
.
svn_base
def
_GuessBase
(
self
,
required
):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info
=
RunShell
([
"svn"
,
"info"
])
for
line
in
info
.
splitlines
():
words
=
line
.
split
()
if
len
(
words
)
==
2
and
words
[
0
]
==
"URL:"
:
url
=
words
[
1
]
scheme
,
netloc
,
path
,
params
,
query
,
fragment
=
urlparse
.
urlparse
(
url
)
username
,
netloc
=
urllib
.
splituser
(
netloc
)
if
username
:
logging
.
info
(
"Removed username from base URL"
)
if
netloc
.
endswith
(
"svn.python.org"
):
if
netloc
==
"svn.python.org"
:
if
path
.
startswith
(
"/projects/"
):
path
=
path
[
9
:]
elif
netloc
!=
"pythondev@svn.python.org"
:
ErrorExit
(
"Unrecognized Python URL: %s"
%
url
)
base
=
"http://svn.python.org/view/*checkout*%s/"
%
path
logging
.
info
(
"Guessed Python base = %s"
,
base
)
elif
netloc
.
endswith
(
"svn.collab.net"
):
if
path
.
startswith
(
"/repos/"
):
path
=
path
[
6
:]
base
=
"http://svn.collab.net/viewvc/*checkout*%s/"
%
path
logging
.
info
(
"Guessed CollabNet base = %s"
,
base
)
elif
netloc
.
endswith
(
".googlecode.com"
):
path
=
path
+
"/"
base
=
urlparse
.
urlunparse
((
"http"
,
netloc
,
path
,
params
,
query
,
fragment
))
logging
.
info
(
"Guessed Google Code base = %s"
,
base
)
else
:
path
=
path
+
"/"
base
=
urlparse
.
urlunparse
((
scheme
,
netloc
,
path
,
params
,
query
,
fragment
))
logging
.
info
(
"Guessed base = %s"
,
base
)
return
base
if
required
:
ErrorExit
(
"Can't find URL in output from svn info"
)
return
None
def
GenerateDiff
(
self
,
args
):
cmd
=
[
"svn"
,
"diff"
]
if
self
.
options
.
revision
:
cmd
+=
[
"-r"
,
self
.
options
.
revision
]
cmd
.
extend
(
args
)
data
=
RunShell
(
cmd
)
count
=
0
for
line
in
data
.
splitlines
():
if
line
.
startswith
(
"Index:"
)
or
line
.
startswith
(
"Property changes on:"
):
count
+=
1
logging
.
info
(
line
)
if
not
count
:
ErrorExit
(
"No valid patches found in output from svn diff"
)
return
data
def
_CollapseKeywords
(
self
,
content
,
keyword_str
):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (https://reviews.reviewboard.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords
=
{
# Standard keywords
'Date'
:
[
'Date'
,
'LastChangedDate'
],
'Revision'
:
[
'Revision'
,
'LastChangedRevision'
,
'Rev'
],
'Author'
:
[
'Author'
,
'LastChangedBy'
],
'HeadURL'
:
[
'HeadURL'
,
'URL'
],
'Id'
:
[
'Id'
],
# Aliases
'LastChangedDate'
:
[
'LastChangedDate'
,
'Date'
],
'LastChangedRevision'
:
[
'LastChangedRevision'
,
'Rev'
,
'Revision'
],
'LastChangedBy'
:
[
'LastChangedBy'
,
'Author'
],
'URL'
:
[
'URL'
,
'HeadURL'
],
}
def
repl
(
m
):
if
m
.
group
(
2
):
return
"$%s::%s$"
%
(
m
.
group
(
1
),
" "
*
len
(
m
.
group
(
3
)))
return
"$%s$"
%
m
.
group
(
1
)
keywords
=
[
keyword
for
name
in
keyword_str
.
split
(
" "
)
for
keyword
in
svn_keywords
.
get
(
name
,
[])]
return
re
.
sub
(
r
"\$(%s):(:?)([^\$]+)\$"
%
'|'
.
join
(
keywords
),
repl
,
content
)
def
GetUnknownFiles
(
self
):
status
=
RunShell
([
"svn"
,
"status"
,
"--ignore-externals"
],
silent_ok
=
True
)
unknown_files
=
[]
for
line
in
status
.
split
(
"
\n
"
):
if
line
and
line
[
0
]
==
"?"
:
unknown_files
.
append
(
line
)
return
unknown_files
def
ReadFile
(
self
,
filename
):
"""Returns the contents of a file."""
file
=
open
(
filename
,
'rb'
)
result
=
""
try
:
result
=
file
.
read
()
finally
:
file
.
close
()
return
result
def
GetStatus
(
self
,
filename
):
"""Returns the status of a file."""
if
not
self
.
options
.
revision
:
status
=
RunShell
([
"svn"
,
"status"
,
"--ignore-externals"
,
filename
])
if
not
status
:
ErrorExit
(
"svn status returned no output for %s"
%
filename
)
status_lines
=
status
.
splitlines
()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# https://web.archive.org/web/20090918234815/svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if
(
len
(
status_lines
)
==
3
and
not
status_lines
[
0
]
and
status_lines
[
1
].
startswith
(
"--- Changelist"
)):
status
=
status_lines
[
2
]
else
:
status
=
status_lines
[
0
]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else
:
dirname
,
relfilename
=
os
.
path
.
split
(
filename
)
if
dirname
not
in
self
.
svnls_cache
:
cmd
=
[
"svn"
,
"list"
,
"-r"
,
self
.
rev_start
,
dirname
or
"."
]
out
,
returncode
=
RunShellWithReturnCode
(
cmd
)
if
returncode
:
ErrorExit
(
"Failed to get status for %s."
%
filename
)
old_files
=
out
.
splitlines
()
args
=
[
"svn"
,
"list"
]
if
self
.
rev_end
:
args
+=
[
"-r"
,
self
.
rev_end
]
cmd
=
args
+
[
dirname
or
"."
]
out
,
returncode
=
RunShellWithReturnCode
(
cmd
)
if
returncode
:
ErrorExit
(
"Failed to run command %s"
%
cmd
)
self
.
svnls_cache
[
dirname
]
=
(
old_files
,
out
.
splitlines
())
old_files
,
new_files
=
self
.
svnls_cache
[
dirname
]
if
relfilename
in
old_files
and
relfilename
not
in
new_files
:
status
=
"D "
elif
relfilename
in
old_files
and
relfilename
in
new_files
:
status
=
"M "
else
:
status
=
"A "
return
status
def
GetBaseFile
(
self
,
filename
):
status
=
self
.
GetStatus
(
filename
)
base_content
=
None
new_content
=
None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if
status
[
0
]
==
"A"
and
status
[
3
]
!=
"+"
:
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype
=
RunShell
([
"svn"
,
"propget"
,
"svn:mime-type"
,
filename
],
silent_ok
=
True
)
base_content
=
""
is_binary
=
mimetype
and
not
mimetype
.
startswith
(
"text/"
)
if
is_binary
and
self
.
IsImage
(
filename
):
new_content
=
self
.
ReadFile
(
filename
)
elif
(
status
[
0
]
in
(
"M"
,
"D"
,
"R"
)
or
(
status
[
0
]
==
"A"
and
status
[
3
]
==
"+"
)
or
# Copied file.
(
status
[
0
]
==
" "
and
status
[
1
]
==
"M"
)):
# Property change.
args
=
[]
if
self
.
options
.
revision
:
url
=
"%s/%s@%s"
%
(
self
.
svn_base
,
filename
,
self
.
rev_start
)
else
:
# Don't change filename, it's needed later.
url
=
filename
args
+=
[
"-r"
,
"BASE"
]
cmd
=
[
"svn"
]
+
args
+
[
"propget"
,
"svn:mime-type"
,
url
]
mimetype
,
returncode
=
RunShellWithReturnCode
(
cmd
)
if
returncode
:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype
=
""
get_base
=
False
is_binary
=
mimetype
and
not
mimetype
.
startswith
(
"text/"
)
if
status
[
0
]
==
" "
:
# Empty base content just to force an upload.
base_content
=
""
elif
is_binary
:
if
self
.
IsImage
(
filename
):
get_base
=
True
if
status
[
0
]
==
"M"
:
if
not
self
.
rev_end
:
new_content
=
self
.
ReadFile
(
filename
)
else
:
url
=
"%s/%s@%s"
%
(
self
.
svn_base
,
filename
,
self
.
rev_end
)
new_content
=
RunShell
([
"svn"
,
"cat"
,
url
],
universal_newlines
=
True
,
silent_ok
=
True
)
else
:
base_content
=
""
else
:
get_base
=
True
if
get_base
:
if
is_binary
:
universal_newlines
=
False
else
:
universal_newlines
=
True
if
self
.
rev_start
:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url
=
"%s/%s@%s"
%
(
self
.
svn_base
,
filename
,
self
.
rev_start
)
base_content
=
RunShell
([
"svn"
,
"cat"
,
url
],
universal_newlines
=
universal_newlines
,
silent_ok
=
True
)
else
:
base_content
=
RunShell
([
"svn"
,
"cat"
,
filename
],
universal_newlines
=
universal_newlines
,
silent_ok
=
True
)
if
not
is_binary
:
args
=
[]
if
self
.
rev_start
:
url
=
"%s/%s@%s"
%
(
self
.
svn_base
,
filename
,
self
.
rev_start
)
else
:
url
=
filename
args
+=
[
"-r"
,
"BASE"
]
cmd
=
[
"svn"
]
+
args
+
[
"propget"
,
"svn:keywords"
,
url
]
keywords
,
returncode
=
RunShellWithReturnCode
(
cmd
)
if
keywords
and
not
returncode
:
base_content
=
self
.
_CollapseKeywords
(
base_content
,
keywords
)
else
:
StatusUpdate
(
"svn status returned unexpected output: %s"
%
status
)
sys
.
exit
(
1
)
return
base_content
,
new_content
,
is_binary
,
status
[
0
:
5
]
class
GitVCS
(
VersionControlSystem
):
"""Implementation of the VersionControlSystem interface for Git."""
def
__init__
(
self
,
options
):
super
(
GitVCS
,
self
).
__init__
(
options
)
# Map of filename -> hash of base file.
self
.
base_hashes
=
{}
def
GenerateDiff
(
self
,
extra_args
):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if
self
.
options
.
revision
:
extra_args
=
[
self
.
options
.
revision
]
+
extra_args
gitdiff
=
RunShell
([
"git"
,
"diff"
,
"--full-index"
]
+
extra_args
)
svndiff
=
[]
filecount
=
0
filename
=
None
for
line
in
gitdiff
.
splitlines
():
match
=
re
.
match
(
r
"diff --git a/(.*) b/.*$"
,
line
)
if
match
:
filecount
+=
1
filename
=
match
.
group
(
1
)
svndiff
.
append
(
"Index: %s
\n
"
%
filename
)
else
:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match
=
re
.
match
(
r
"index (\w+)\.\."
,
line
)
if
match
:
self
.
base_hashes
[
filename
]
=
match
.
group
(
1
)
svndiff
.
append
(
line
+
"
\n
"
)
if
not
filecount
:
ErrorExit
(
"No valid patches found in output from git diff"
)
return
""
.
join
(
svndiff
)
def
GetUnknownFiles
(
self
):
status
=
RunShell
([
"git"
,
"ls-files"
,
"--exclude-standard"
,
"--others"
],
silent_ok
=
True
)
return
status
.
splitlines
()
def
GetBaseFile
(
self
,
filename
):
hash
=
self
.
base_hashes
[
filename
]
base_content
=
None
new_content
=
None
is_binary
=
False
if
hash
==
"0"
*
40
:
# All-zero hash indicates no base file.
status
=
"A"
base_content
=
""
else
:
status
=
"M"
base_content
,
returncode
=
RunShellWithReturnCode
([
"git"
,
"show"
,
hash
])
if
returncode
:
ErrorExit
(
"Got error status from 'git show %s'"
%
hash
)
return
(
base_content
,
new_content
,
is_binary
,
status
)
class
MercurialVCS
(
VersionControlSystem
):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def
__init__
(
self
,
options
,
repo_dir
):
super
(
MercurialVCS
,
self
).
__init__
(
options
)
# Absolute path to repository (we can be in a subdir)
self
.
repo_dir
=
os
.
path
.
normpath
(
repo_dir
)
# Compute the subdir
cwd
=
os
.
path
.
normpath
(
os
.
getcwd
())
assert
cwd
.
startswith
(
self
.
repo_dir
)
self
.
subdir
=
cwd
[
len
(
self
.
repo_dir
):].
lstrip
(
r
"\/"
)
if
self
.
options
.
revision
:
self
.
base_rev
=
self
.
options
.
revision
else
:
self
.
base_rev
=
RunShell
([
"hg"
,
"parent"
,
"-q"
]).
split
(
':'
)[
1
].
strip
()
def
_GetRelPath
(
self
,
filename
):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert
filename
.
startswith
(
self
.
subdir
),
filename
return
filename
[
len
(
self
.
subdir
):].
lstrip
(
r
"\/"
)
def
GenerateDiff
(
self
,
extra_args
):
# If no file specified, restrict to the current subdir
extra_args
=
extra_args
or
[
"."
]
cmd
=
[
"hg"
,
"diff"
,
"--git"
,
"-r"
,
self
.
base_rev
]
+
extra_args
data
=
RunShell
(
cmd
,
silent_ok
=
True
)
svndiff
=
[]
filecount
=
0
for
line
in
data
.
splitlines
():
m
=
re
.
match
(
"diff --git a/(\S+) b/(\S+)"
,
line
)
if
m
:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename
=
m
.
group
(
2
)
svndiff
.
append
(
"Index: %s"
%
filename
)
svndiff
.
append
(
"="
*
67
)
filecount
+=
1
logging
.
info
(
line
)
else
:
svndiff
.
append
(
line
)
if
not
filecount
:
ErrorExit
(
"No valid patches found in output from hg diff"
)
return
"
\n
"
.
join
(
svndiff
)
+
"
\n
"
def
GetUnknownFiles
(
self
):
"""Return a list of files unknown to the VCS."""
args
=
[]
status
=
RunShell
([
"hg"
,
"status"
,
"--rev"
,
self
.
base_rev
,
"-u"
,
"."
],
silent_ok
=
True
)
unknown_files
=
[]
for
line
in
status
.
splitlines
():
st
,
fn
=
line
.
split
(
" "
,
1
)
if
st
==
"?"
:
unknown_files
.
append
(
fn
)
return
unknown_files
def
GetBaseFile
(
self
,
filename
):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content
=
""
new_content
=
None
is_binary
=
False
oldrelpath
=
relpath
=
self
.
_GetRelPath
(
filename
)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out
=
RunShell
([
"hg"
,
"status"
,
"-C"
,
"--rev"
,
self
.
base_rev
,
relpath
])
out
=
out
.
splitlines
()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if
out
[
0
].
startswith
(
'%s: '
%
relpath
):
out
=
out
[
1
:]
if
len
(
out
)
>
1
:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath
=
out
[
1
].
strip
()
status
=
"M"
else
:
status
,
_
=
out
[
0
].
split
(
' '
,
1
)
if
status
!=
"A"
:
base_content
=
RunShell
([
"hg"
,
"cat"
,
"-r"
,
self
.
base_rev
,
oldrelpath
],
silent_ok
=
True
)
is_binary
=
"
\0
"
in
base_content
# Mercurial's heuristic
if
status
!=
"R"
:
new_content
=
open
(
relpath
,
"rb"
).
read
()
is_binary
=
is_binary
or
"
\0
"
in
new_content
if
is_binary
and
base_content
:
# Fetch again without converting newlines
base_content
=
RunShell
([
"hg"
,
"cat"
,
"-r"
,
self
.
base_rev
,
oldrelpath
],
silent_ok
=
True
,
universal_newlines
=
False
)
if
not
is_binary
or
not
self
.
IsImage
(
relpath
):
new_content
=
None
return
base_content
,
new_content
,
is_binary
,
status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def
SplitPatch
(
data
):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches
=
[]
filename
=
None
diff
=
[]
for
line
in
data
.
splitlines
(
True
):
new_filename
=
None
if
line
.
startswith
(
'Index:'
):
unused
,
new_filename
=
line
.
split
(
':'
,
1
)
new_filename
=
new_filename
.
strip
()
elif
line
.
startswith
(
'Property changes on:'
):
unused
,
temp_filename
=
line
.
split
(
':'
,
1
)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename
=
temp_filename
.
strip
().
replace
(
'
\\
'
,
'/'
)
if
temp_filename
!=
filename
:
# File has property changes but no modifications, create a new diff.
new_filename
=
temp_filename
if
new_filename
:
if
filename
and
diff
:
patches
.
append
((
filename
,
''
.
join
(
diff
)))
filename
=
new_filename
diff
=
[
line
]
continue
if
diff
is
not
None
:
diff
.
append
(
line
)
if
filename
and
diff
:
patches
.
append
((
filename
,
''
.
join
(
diff
)))
return
patches
def
UploadSeparatePatches
(
issue
,
rpc_server
,
patchset
,
data
,
options
):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches
=
SplitPatch
(
data
)
rv
=
[]
for
patch
in
patches
:
if
len
(
patch
[
1
])
>
MAX_UPLOAD_SIZE
:
print
(
"Not uploading the patch for "
+
patch
[
0
]
+
" because the file is too large."
)
continue
form_fields
=
[(
"filename"
,
patch
[
0
])]
if
not
options
.
download_base
:
form_fields
.
append
((
"content_upload"
,
"1"
))
files
=
[(
"data"
,
"data.diff"
,
patch
[
1
])]
ctype
,
body
=
EncodeMultipartFormData
(
form_fields
,
files
)
url
=
"/%d/upload_patch/%d"
%
(
int
(
issue
),
int
(
patchset
))
print
"Uploading patch for "
+
patch
[
0
]
response_body
=
rpc_server
.
Send
(
url
,
body
,
content_type
=
ctype
)
lines
=
response_body
.
splitlines
()
if
not
lines
or
lines
[
0
]
!=
"OK"
:
StatusUpdate
(
" --> %s"
%
response_body
)
sys
.
exit
(
1
)
rv
.
append
([
lines
[
1
],
patch
[
0
]])
return
rv
def
GuessVCS
(
options
):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try
:
out
,
returncode
=
RunShellWithReturnCode
([
"hg"
,
"root"
])
if
returncode
==
0
:
return
MercurialVCS
(
options
,
out
.
strip
())
except
OSError
,
(
errno
,
message
):
if
errno
!=
2
:
# ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if
os
.
path
.
isdir
(
'.svn'
):
logging
.
info
(
"Guessed VCS = Subversion"
)
return
SubversionVCS
(
options
)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try
:
out
,
returncode
=
RunShellWithReturnCode
([
"git"
,
"rev-parse"
,
"--is-inside-work-tree"
])
if
returncode
==
0
:
return
GitVCS
(
options
)
except
OSError
,
(
errno
,
message
):
if
errno
!=
2
:
# ENOENT -- they don't have git installed.
raise
ErrorExit
((
"Could not guess version control system. "
"Are you in a working copy directory?"
))
def
RealMain
(
argv
,
data
=
None
):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging
.
basicConfig
(
format
=
(
"%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "
))
os
.
environ
[
'LC_ALL'
]
=
'C'
options
,
args
=
parser
.
parse_args
(
argv
[
1
:])
global
verbosity
verbosity
=
options
.
verbose
if
verbosity
>=
3
:
logging
.
getLogger
().
setLevel
(
logging
.
DEBUG
)
elif
verbosity
>=
2
:
logging
.
getLogger
().
setLevel
(
logging
.
INFO
)
vcs
=
GuessVCS
(
options
)
if
isinstance
(
vcs
,
SubversionVCS
):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base
=
vcs
.
GuessBase
(
options
.
download_base
)
else
:
base
=
None
if
not
base
and
options
.
download_base
:
options
.
download_base
=
True
logging
.
info
(
"Enabled upload of base file"
)
if
not
options
.
assume_yes
:
vcs
.
CheckForUnknownFiles
()
if
data
is
None
:
data
=
vcs
.
GenerateDiff
(
args
)
files
=
vcs
.
GetBaseFiles
(
data
)
if
verbosity
>=
1
:
print
"Upload server:"
,
options
.
server
,
"(change with -s/--server)"
if
options
.
issue
:
prompt
=
"Message describing this patch set: "
else
:
prompt
=
"New issue subject: "
message
=
options
.
message
or
raw_input
(
prompt
).
strip
()
if
not
message
:
ErrorExit
(
"A non-empty message is required"
)
rpc_server
=
GetRpcServer
(
options
)
form_fields
=
[(
"subject"
,
message
)]
if
base
:
form_fields
.
append
((
"base"
,
base
))
if
options
.
issue
:
form_fields
.
append
((
"issue"
,
str
(
options
.
issue
)))
if
options
.
email
:
form_fields
.
append
((
"user"
,
options
.
email
))
if
options
.
reviewers
:
for
reviewer
in
options
.
reviewers
.
split
(
','
):
if
"@"
in
reviewer
and
not
reviewer
.
split
(
"@"
)[
1
].
count
(
"."
)
==
1
:
ErrorExit
(
"Invalid email address: %s"
%
reviewer
)
form_fields
.
append
((
"reviewers"
,
options
.
reviewers
))
if
options
.
cc
:
for
cc
in
options
.
cc
.
split
(
','
):
if
"@"
in
cc
and
not
cc
.
split
(
"@"
)[
1
].
count
(
"."
)
==
1
:
ErrorExit
(
"Invalid email address: %s"
%
cc
)
form_fields
.
append
((
"cc"
,
options
.
cc
))
description
=
options
.
description
if
options
.
description_file
:
if
options
.
description
:
ErrorExit
(
"Can't specify description and description_file"
)
file
=
open
(
options
.
description_file
,
'r'
)
description
=
file
.
read
()
file
.
close
()
if
description
:
form_fields
.
append
((
"description"
,
description
))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes
=
""
for
file
,
info
in
files
.
iteritems
():
if
not
info
[
0
]
is
None
:
checksum
=
md5
.
new
(
info
[
0
]).
hexdigest
()
if
base_hashes
:
base_hashes
+=
"|"
base_hashes
+=
checksum
+
":"
+
file
form_fields
.
append
((
"base_hashes"
,
base_hashes
))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if
options
.
send_mail
and
options
.
download_base
:
form_fields
.
append
((
"send_mail"
,
"1"
))
if
not
options
.
download_base
:
form_fields
.
append
((
"content_upload"
,
"1"
))
if
len
(
data
)
>
MAX_UPLOAD_SIZE
:
print
"Patch is large, so uploading file patches separately."
uploaded_diff_file
=
[]
form_fields
.
append
((
"separate_patches"
,
"1"
))
else
:
uploaded_diff_file
=
[(
"data"
,
"data.diff"
,
data
)]
ctype
,
body
=
EncodeMultipartFormData
(
form_fields
,
uploaded_diff_file
)
response_body
=
rpc_server
.
Send
(
"/upload"
,
body
,
content_type
=
ctype
)
patchset
=
None
if
not
options
.
download_base
or
not
uploaded_diff_file
:
lines
=
response_body
.
splitlines
()
if
len
(
lines
)
>=
2
:
msg
=
lines
[
0
]
patchset
=
lines
[
1
].
strip
()
patches
=
[
x
.
split
(
" "
,
1
)
for
x
in
lines
[
2
:]]
else
:
msg
=
response_body
else
:
msg
=
response_body
StatusUpdate
(
msg
)
if
not
response_body
.
startswith
(
"Issue created."
)
and
\
not
response_body
.
startswith
(
"Issue updated."
):
sys
.
exit
(
0
)
issue
=
msg
[
msg
.
rfind
(
"/"
)
+
1
:]
if
not
uploaded_diff_file
:
result
=
UploadSeparatePatches
(
issue
,
rpc_server
,
patchset
,
data
,
options
)
if
not
options
.
download_base
:
patches
=
result
if
not
options
.
download_base
:
vcs
.
UploadBaseFiles
(
issue
,
rpc_server
,
patches
,
patchset
,
options
,
files
)
if
options
.
send_mail
:
rpc_server
.
Send
(
"/"
+
issue
+
"/mail"
,
payload
=
""
)
return
issue
,
patchset
def
main
():
try
:
RealMain
(
sys
.
argv
)
except
KeyboardInterrupt
:
print
StatusUpdate
(
"Interrupted."
)
sys
.
exit
(
1
)
if
__name__
==
"__main__"
:
main
()
googletest/scripts/upload_gtest.py
deleted
100755 → 0
View file @
eb7e38df
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__
=
'wan@google.com (Zhanyong Wan)'
import
os
import
sys
CC_FLAG
=
'--cc='
GTEST_GROUP
=
'googletestframework@googlegroups.com'
def
main
():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
upload_py_path
=
os
.
path
.
join
(
my_dir
,
'upload.py'
)
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv
=
[
upload_py_path
]
found_cc_flag
=
False
for
arg
in
sys
.
argv
[
1
:]:
if
arg
.
startswith
(
CC_FLAG
):
found_cc_flag
=
True
cc_line
=
arg
[
len
(
CC_FLAG
):]
cc_list
=
[
addr
for
addr
in
cc_line
.
split
(
','
)
if
addr
]
if
GTEST_GROUP
not
in
cc_list
:
cc_list
.
append
(
GTEST_GROUP
)
upload_py_argv
.
append
(
CC_FLAG
+
','
.
join
(
cc_list
))
else
:
upload_py_argv
.
append
(
arg
)
if
not
found_cc_flag
:
upload_py_argv
.
append
(
CC_FLAG
+
GTEST_GROUP
)
# Invokes upload.py with the modified command line flags.
os
.
execv
(
upload_py_path
,
upload_py_argv
)
if
__name__
==
'__main__'
:
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment