Unverified Commit edadfecd authored by Akash Patel's avatar Akash Patel Committed by GitHub
Browse files

Update gtest to 1.11.0 (#1086)

Properly resolves #1083, #996.
parent 26e3b704
......@@ -85,9 +85,12 @@ class GTestEnvVarTest(gtest_test_utils.TestCase):
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
SetEnvVar('TESTBRIDGE_TEST_RUNNER_FAIL_FAST', None) # For 'fail_fast' test
TestFlag('fail_fast', '1', '0')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('brief', '1', '0')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
......
......@@ -72,6 +72,11 @@ void PrintFlag(const char* flag) {
return;
}
if (strcmp(flag, "fail_fast") == 0) {
cout << GTEST_FLAG(fail_fast);
return;
}
if (strcmp(flag, "filter") == 0) {
cout << GTEST_FLAG(filter);
return;
......@@ -82,6 +87,11 @@ void PrintFlag(const char* flag) {
return;
}
if (strcmp(flag, "brief") == 0) {
cout << GTEST_FLAG(brief);
return;
}
if (strcmp(flag, "print_time") == 0) {
cout << GTEST_FLAG(print_time);
return;
......
#!/usr/bin/env python
#
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test fail_fast.
A user can specify if a Google Test program should continue test execution
after a test failure via the GTEST_FAIL_FAST environment variable or the
--gtest_fail_fast flag. The default value of the flag can also be changed
by Bazel fail fast environment variable TESTBRIDGE_TEST_RUNNER_FAIL_FAST.
This script tests such functionality by invoking googletest-failfast-unittest_
(a program written with Google Test) with different environments and command
line flags.
"""
import os
import gtest_test_utils
# Constants.
# Bazel testbridge environment variable for fail fast
BAZEL_FAIL_FAST_ENV_VAR = 'TESTBRIDGE_TEST_RUNNER_FAIL_FAST'
# The environment variable for specifying fail fast.
FAIL_FAST_ENV_VAR = 'GTEST_FAIL_FAST'
# The command line flag for specifying fail fast.
FAIL_FAST_FLAG = 'gtest_fail_fast'
# The command line flag to run disabled tests.
RUN_DISABLED_FLAG = 'gtest_also_run_disabled_tests'
# The command line flag for specifying a filter.
FILTER_FLAG = 'gtest_filter'
# Command to run the googletest-failfast-unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath(
'googletest-failfast-unittest_')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(test_suite=None, fail_fast=None, run_disabled=False):
"""Runs the test program and returns its output."""
args = []
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
'.GTestFailFastUnitTest.xml')
args += ['--gtest_output=xml:' + xml_path]
if fail_fast is not None:
if isinstance(fail_fast, str):
args += ['--%s=%s' % (FAIL_FAST_FLAG, fail_fast)]
elif fail_fast:
args += ['--%s' % FAIL_FAST_FLAG]
else:
args += ['--no%s' % FAIL_FAST_FLAG]
if test_suite:
args += ['--%s=%s.*' % (FILTER_FLAG, test_suite)]
if run_disabled:
args += ['--%s' % RUN_DISABLED_FLAG]
txt_out = gtest_test_utils.Subprocess([COMMAND] + args, env=environ).output
with open(xml_path) as xml_file:
return txt_out, xml_file.read()
# The unit test.
class GTestFailFastUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag for fail_fast."""
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the fail_fast."""
txt, _ = RunAndReturnOutput()
self.assertIn('22 FAILED TEST', txt)
def testGoogletestFlag(self):
txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=False)
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
def testGoogletestEnvVar(self):
"""Tests the behavior of specifying fail_fast via Googletest env var."""
try:
SetEnvVar(FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
SetEnvVar(FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
def testBazelEnvVar(self):
"""Tests the behavior of specifying fail_fast via Bazel testbridge."""
try:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
finally:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None)
def testFlagOverridesEnvVar(self):
"""Tests precedence of flag over env var."""
try:
SetEnvVar(FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest', True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
def testGoogletestEnvVarOverridesBazelEnvVar(self):
"""Tests that the Googletest native env var over Bazel testbridge."""
try:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0')
SetEnvVar(FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None)
def testEventListener(self):
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
for expected_count, callback in [(1, 'OnTestSuiteStart'),
(5, 'OnTestStart'),
(5, 'OnTestEnd'),
(5, 'OnTestPartResult'),
(1, 'OnTestSuiteEnd')]:
self.assertEqual(
expected_count, txt.count(callback),
'Expected %d calls to callback %s match count on output: %s ' %
(expected_count, callback, txt))
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=False)
self.assertIn('3 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 1 test', txt)
for expected_count, callback in [(1, 'OnTestSuiteStart'),
(5, 'OnTestStart'),
(5, 'OnTestEnd'),
(5, 'OnTestPartResult'),
(1, 'OnTestSuiteEnd')]:
self.assertEqual(
expected_count, txt.count(callback),
'Expected %d calls to callback %s match count on output: %s ' %
(expected_count, callback, txt))
def assertXmlResultCount(self, result, count, xml):
self.assertEqual(
count, xml.count('result="%s"' % result),
'Expected \'result="%s"\' match count of %s: %s ' %
(result, count, xml))
def assertXmlStatusCount(self, status, count, xml):
self.assertEqual(
count, xml.count('status="%s"' % status),
'Expected \'status="%s"\' match count of %s: %s ' %
(status, count, xml))
def assertFailFastXmlAndTxtOutput(self,
fail_fast,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert XML and text output of a test execution."""
txt, xml = RunAndReturnOutput(test_suite, fail_fast, run_disabled)
if failure_count > 0:
self.assertIn('%s FAILED TEST' % failure_count, txt)
if suppressed_count > 0:
self.assertIn('%s DISABLED TEST' % suppressed_count, txt)
if skipped_count > 0:
self.assertIn('[ SKIPPED ] %s tests' % skipped_count, txt)
self.assertXmlStatusCount('run',
passed_count + failure_count + skipped_count, xml)
self.assertXmlStatusCount('notrun', suppressed_count, xml)
self.assertXmlResultCount('completed', passed_count + failure_count, xml)
self.assertXmlResultCount('skipped', skipped_count, xml)
self.assertXmlResultCount('suppressed', suppressed_count, xml)
def assertFailFastBehavior(self,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert --fail_fast via flag."""
for fail_fast in ('true', '1', 't', True):
self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
failure_count, skipped_count,
suppressed_count, run_disabled)
def assertNotFailFastBehavior(self,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert --nofail_fast via flag."""
for fail_fast in ('false', '0', 'f', False):
self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
failure_count, skipped_count,
suppressed_count, run_disabled)
def testFlag_HasFixtureTest(self):
"""Tests the behavior of fail_fast and TEST_F."""
self.assertFailFastBehavior(
test_suite='HasFixtureTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasFixtureTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasSimpleTest(self):
"""Tests the behavior of fail_fast and TEST."""
self.assertFailFastBehavior(
test_suite='HasSimpleTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasSimpleTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasParametersTest(self):
"""Tests the behavior of fail_fast and TEST_P."""
self.assertFailFastBehavior(
test_suite='HasParametersSuite/HasParametersTest',
passed_count=0,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasParametersSuite/HasParametersTest',
passed_count=0,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases."""
self.assertFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=1,
skipped_count=2,
suppressed_count=1,
run_disabled=False)
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=3,
skipped_count=0,
suppressed_count=1,
run_disabled=False)
def testFlag_HasDisabledRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases enabled."""
self.assertFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0,
run_disabled=True)
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
run_disabled=True)
def testFlag_HasDisabledSuiteTest(self):
"""Tests the behavior of fail_fast and Disabled test suites."""
self.assertFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=0,
failure_count=0,
skipped_count=0,
suppressed_count=5,
run_disabled=False)
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=0,
failure_count=0,
skipped_count=0,
suppressed_count=5,
run_disabled=False)
def testFlag_HasDisabledSuiteRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test suites enabled."""
self.assertFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0,
run_disabled=True)
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
run_disabled=True)
if SUPPORTS_DEATH_TESTS:
def testFlag_HasDeathTest(self):
"""Tests the behavior of fail_fast and death tests."""
self.assertFailFastBehavior(
test_suite='HasDeathTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasDeathTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
if __name__ == '__main__':
gtest_test_utils.Main()
// Copyright 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Unit test for Google Test test filters.
//
// A user can specify which test(s) in a Google Test program to run via
// either the GTEST_FILTER environment variable or the --gtest_filter
// flag. This is used for testing such functionality.
//
// The program will be invoked from a Python unit test. Don't run it
// directly.
#include "gtest/gtest.h"
namespace {
// Test HasFixtureTest.
class HasFixtureTest : public testing::Test {};
TEST_F(HasFixtureTest, Test0) {}
TEST_F(HasFixtureTest, Test1) { FAIL() << "Expected failure."; }
TEST_F(HasFixtureTest, Test2) { FAIL() << "Expected failure."; }
TEST_F(HasFixtureTest, Test3) { FAIL() << "Expected failure."; }
TEST_F(HasFixtureTest, Test4) { FAIL() << "Expected failure."; }
// Test HasSimpleTest.
TEST(HasSimpleTest, Test0) {}
TEST(HasSimpleTest, Test1) { FAIL() << "Expected failure."; }
TEST(HasSimpleTest, Test2) { FAIL() << "Expected failure."; }
TEST(HasSimpleTest, Test3) { FAIL() << "Expected failure."; }
TEST(HasSimpleTest, Test4) { FAIL() << "Expected failure."; }
// Test HasDisabledTest.
TEST(HasDisabledTest, Test0) {}
TEST(HasDisabledTest, DISABLED_Test1) { FAIL() << "Expected failure."; }
TEST(HasDisabledTest, Test2) { FAIL() << "Expected failure."; }
TEST(HasDisabledTest, Test3) { FAIL() << "Expected failure."; }
TEST(HasDisabledTest, Test4) { FAIL() << "Expected failure."; }
// Test HasDeathTest
TEST(HasDeathTest, Test0) { EXPECT_DEATH_IF_SUPPORTED(exit(1), ".*"); }
TEST(HasDeathTest, Test1) {
EXPECT_DEATH_IF_SUPPORTED(FAIL() << "Expected failure.", ".*");
}
TEST(HasDeathTest, Test2) {
EXPECT_DEATH_IF_SUPPORTED(FAIL() << "Expected failure.", ".*");
}
TEST(HasDeathTest, Test3) {
EXPECT_DEATH_IF_SUPPORTED(FAIL() << "Expected failure.", ".*");
}
TEST(HasDeathTest, Test4) {
EXPECT_DEATH_IF_SUPPORTED(FAIL() << "Expected failure.", ".*");
}
// Test DISABLED_HasDisabledSuite
TEST(DISABLED_HasDisabledSuite, Test0) {}
TEST(DISABLED_HasDisabledSuite, Test1) { FAIL() << "Expected failure."; }
TEST(DISABLED_HasDisabledSuite, Test2) { FAIL() << "Expected failure."; }
TEST(DISABLED_HasDisabledSuite, Test3) { FAIL() << "Expected failure."; }
TEST(DISABLED_HasDisabledSuite, Test4) { FAIL() << "Expected failure."; }
// Test HasParametersTest
class HasParametersTest : public testing::TestWithParam<int> {};
TEST_P(HasParametersTest, Test1) { FAIL() << "Expected failure."; }
TEST_P(HasParametersTest, Test2) { FAIL() << "Expected failure."; }
INSTANTIATE_TEST_SUITE_P(HasParametersSuite, HasParametersTest,
testing::Values(1, 2));
class MyTestListener : public ::testing::EmptyTestEventListener {
void OnTestSuiteStart(const ::testing::TestSuite& test_suite) override {
printf("We are in OnTestSuiteStart of %s.\n", test_suite.name());
}
void OnTestStart(const ::testing::TestInfo& test_info) override {
printf("We are in OnTestStart of %s.%s.\n", test_info.test_suite_name(),
test_info.name());
}
void OnTestPartResult(
const ::testing::TestPartResult& test_part_result) override {
printf("We are in OnTestPartResult %s:%d.\n", test_part_result.file_name(),
test_part_result.line_number());
}
void OnTestEnd(const ::testing::TestInfo& test_info) override {
printf("We are in OnTestEnd of %s.%s.\n", test_info.test_suite_name(),
test_info.name());
}
void OnTestSuiteEnd(const ::testing::TestSuite& test_suite) override {
printf("We are in OnTestSuiteEnd of %s.\n", test_suite.name());
}
};
TEST(HasSkipTest, Test0) { SUCCEED() << "Expected success."; }
TEST(HasSkipTest, Test1) { GTEST_SKIP() << "Expected skip."; }
TEST(HasSkipTest, Test2) { FAIL() << "Expected failure."; }
TEST(HasSkipTest, Test3) { FAIL() << "Expected failure."; }
TEST(HasSkipTest, Test4) { FAIL() << "Expected failure."; }
} // namespace
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
::testing::UnitTest::GetInstance()->listeners().Append(new MyTestListener());
return RUN_ALL_TESTS();
}
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
......@@ -28,51 +25,48 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's global test environment behavior.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
--cc=googlemock@googlegroups.com to upload.py.
A user can specify a global test environment via
testing::AddGlobalTestEnvironment. Failures in the global environment should
result in all unit tests being skipped.
USAGE: upload_gmock.py [options for upload.py]
This script tests such functionality by invoking
googletest-global-environment-unittest_ (a program written with Google Test).
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
def RunAndReturnOutput():
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([
gtest_test_utils.GetTestExecutablePath(
'googletest-global-environment-unittest_')
]).output
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = 'googlemock@googlegroups.com'
class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase):
"""Tests global test environment failures."""
def testEnvironmentSetUpFails(self):
"""Tests the behavior of not specifying the fail_fast."""
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Run the test.
txt = RunAndReturnOutput()
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
# We should see the text of the global environment setup error.
self.assertIn('Canned environment setup error', txt)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Our test should have been skipped due to the error, and not treated as a
# pass.
self.assertIn('[ SKIPPED ] 1 test', txt)
self.assertIn('[ PASSED ] 0 tests', txt)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
# The test case shouldn't have been run.
self.assertNotIn('Unexpected call', txt)
if __name__ == '__main__':
main()
gtest_test_utils.Main()
// Copyright 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Unit test for Google Test global test environments.
//
// The program will be invoked from a Python unit test. Don't run it
// directly.
#include "gtest/gtest.h"
namespace {
// An environment that always fails in its SetUp method.
class FailingEnvironment final : public ::testing::Environment {
public:
void SetUp() override { FAIL() << "Canned environment setup error"; }
};
// Register the environment.
auto* const g_environment_ =
::testing::AddGlobalTestEnvironment(new FailingEnvironment);
// A test that doesn't actually run.
TEST(SomeTest, DoesFoo) { FAIL() << "Unexpected call"; }
} // namespace
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -58,9 +58,9 @@ else:
EXPECTED_NON_EMPTY = {
u'tests':
24,
26,
u'failures':
4,
5,
u'disabled':
2,
u'errors':
......@@ -158,9 +158,9 @@ EXPECTED_NON_EMPTY = {
u'name':
u'SkippedTest',
u'tests':
1,
3,
u'failures':
0,
1,
u'disabled':
0,
u'errors':
......@@ -176,6 +176,32 @@ EXPECTED_NON_EMPTY = {
u'time': u'*',
u'timestamp': u'*',
u'classname': u'SkippedTest'
}, {
u'name': u'SkippedWithMessage',
u'status': u'RUN',
u'result': u'SKIPPED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'SkippedTest'
}, {
u'name':
u'SkippedAfterFailure',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'SkippedTest',
u'failures': [{
u'failure': u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
}]
}]
}, {
u'name':
......@@ -586,15 +612,59 @@ EXPECTED_FILTERED = {
}],
}
EXPECTED_EMPTY = {
u'tests': 0,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'timestamp': u'*',
u'name': u'AllTests',
u'testsuites': [],
EXPECTED_NO_TEST = {
u'tests':
0,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'NonTestSuiteFailure',
u'tests':
1,
u'failures':
1,
u'disabled':
0,
u'skipped':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name':
u'',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'',
u'failures': [{
u'failure': u'gtest_no_test_unittest.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u'',
}]
}]
}],
}
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
......@@ -619,14 +689,14 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
"""
self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY, 1)
def testEmptyJsonOutput(self):
def testNoTestJsonOutput(self):
"""Verifies JSON output for a Google Test binary without actual tests.
Runs a test program that generates an empty JSON output, and
tests that the JSON output is expected.
Runs a test program that generates an JSON output for a binary with no
tests, and tests that the JSON output is expected.
"""
self._TestJsonOutput('gtest_no_test_unittest', EXPECTED_EMPTY, 0)
self._TestJsonOutput('gtest_no_test_unittest', EXPECTED_NO_TEST, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the JSON output is valid.
......
......@@ -42,6 +42,9 @@
# include <windows.h>
#elif GTEST_OS_WINDOWS
# include <direct.h>
#elif GTEST_OS_OS2
// For strcasecmp on OS/2
#include <strings.h>
#endif // GTEST_OS_WINDOWS_MOBILE
#include "src/gtest-internal-inl.h"
......
......@@ -12,7 +12,7 @@ Expected equality of these values:
3
Stack trace: (omitted)
[==========] Running 85 tests from 40 test suites.
[==========] Running 88 tests from 41 test suites.
[----------] Global test environment set-up.
FooEnvironment::SetUp() called.
BarEnvironment::SetUp() called.
......@@ -982,6 +982,43 @@ Expected failure
Stack trace: (omitted)
[ FAILED ] PrintingStrings/ParamTest.Failure/a, where GetParam() = "a"
[----------] 3 tests from GoogleTestVerification
[ RUN ] GoogleTestVerification.UninstantiatedParameterizedTestSuite<NoTests>
googletest-output-test_.cc:#: Failure
Parameterized test suite NoTests is instantiated via INSTANTIATE_TEST_SUITE_P, but no tests are defined via TEST_P . No test cases will run.
Ideally, INSTANTIATE_TEST_SUITE_P should only ever be invoked from code that always depend on code that provides TEST_P. Failing to do so is often an indication of dead code, e.g. the last TEST_P was removed but the rest got left behind.
To suppress this error for this test suite, insert the following line (in a non-header) in the namespace it is defined in:
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(NoTests);
Stack trace: (omitted)
[ FAILED ] GoogleTestVerification.UninstantiatedParameterizedTestSuite<NoTests>
[ RUN ] GoogleTestVerification.UninstantiatedParameterizedTestSuite<DetectNotInstantiatedTest>
googletest-output-test_.cc:#: Failure
Parameterized test suite DetectNotInstantiatedTest is defined via TEST_P, but never instantiated. None of the test cases will run. Either no INSTANTIATE_TEST_SUITE_P is provided or the only ones provided expand to nothing.
Ideally, TEST_P definitions should only ever be included as part of binaries that intend to use them. (As opposed to, for example, being placed in a library that may be linked in to get other utilities.)
To suppress this error for this test suite, insert the following line (in a non-header) in the namespace it is defined in:
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DetectNotInstantiatedTest);
Stack trace: (omitted)
[ FAILED ] GoogleTestVerification.UninstantiatedParameterizedTestSuite<DetectNotInstantiatedTest>
[ RUN ] GoogleTestVerification.UninstantiatedTypeParameterizedTestSuite<DetectNotInstantiatedTypesTest>
googletest-output-test_.cc:#: Failure
Type parameterized test suite DetectNotInstantiatedTypesTest is defined via REGISTER_TYPED_TEST_SUITE_P, but never instantiated via INSTANTIATE_TYPED_TEST_SUITE_P. None of the test cases will run.
Ideally, TYPED_TEST_P definitions should only ever be included as part of binaries that intend to use them. (As opposed to, for example, being placed in a library that may be linked in to get other utilities.)
To suppress this error for this test suite, insert the following line (in a non-header) in the namespace it is defined in:
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DetectNotInstantiatedTypesTest);
Stack trace: (omitted)
[ FAILED ] GoogleTestVerification.UninstantiatedTypeParameterizedTestSuite<DetectNotInstantiatedTypesTest>
[----------] Global test environment tear-down
BarEnvironment::TearDown() called.
googletest-output-test_.cc:#: Failure
......@@ -995,9 +1032,9 @@ Failed
Expected fatal failure.
Stack trace: (omitted)
[==========] 85 tests from 40 test suites ran.
[==========] 88 tests from 41 test suites ran.
[ PASSED ] 31 tests.
[ FAILED ] 54 tests, listed below:
[ FAILED ] 57 tests, listed below:
[ FAILED ] NonfatalFailureTest.EscapesStringOperands
[ FAILED ] NonfatalFailureTest.DiffForLongStrings
[ FAILED ] FatalFailureTest.FatalFailureInSubroutine
......@@ -1052,8 +1089,11 @@ Stack trace: (omitted)
[ FAILED ] BadDynamicFixture2.Derived
[ FAILED ] PrintingFailingParams/FailingParamTest.Fails/0, where GetParam() = 2
[ FAILED ] PrintingStrings/ParamTest.Failure/a, where GetParam() = "a"
[ FAILED ] GoogleTestVerification.UninstantiatedParameterizedTestSuite<NoTests>
[ FAILED ] GoogleTestVerification.UninstantiatedParameterizedTestSuite<DetectNotInstantiatedTest>
[ FAILED ] GoogleTestVerification.UninstantiatedTypeParameterizedTestSuite<DetectNotInstantiatedTypesTest>
54 FAILED TESTS
57 FAILED TESTS
 YOU HAVE 1 DISABLED TEST
Note: Google Test filter = FatalFailureTest.*:LoggingTest.*
......
......@@ -29,7 +29,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing and Mocking Framework.
r"""Tests the text output of Google C++ Testing and Mocking Framework.
To update the golden file:
googletest_output_test.py --build_dir=BUILD/DIR --gengolden
......@@ -331,7 +331,7 @@ if __name__ == '__main__':
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.write(output.encode())
golden_file.close()
else:
message = (
......
......@@ -476,63 +476,6 @@ TEST(GtestFailAtTest, MessageContainsSpecifiedFileAndLineNumber) {
GTEST_FAIL_AT("foo.cc", 42) << "Expected fatal failure in foo.cc";
}
#if GTEST_IS_THREADSAFE
// A unary function that may die.
void DieIf(bool should_die) {
GTEST_CHECK_(!should_die) << " - death inside DieIf().";
}
// Tests running death tests in a multi-threaded context.
// Used for coordination between the main and the spawn thread.
struct SpawnThreadNotifications {
SpawnThreadNotifications() {}
Notification spawn_thread_started;
Notification spawn_thread_ok_to_terminate;
private:
GTEST_DISALLOW_COPY_AND_ASSIGN_(SpawnThreadNotifications);
};
// The function to be executed in the thread spawn by the
// MultipleThreads test (below).
static void ThreadRoutine(SpawnThreadNotifications* notifications) {
// Signals the main thread that this thread has started.
notifications->spawn_thread_started.Notify();
// Waits for permission to finish from the main thread.
notifications->spawn_thread_ok_to_terminate.WaitForNotification();
}
// This is a death-test test, but it's not named with a DeathTest
// suffix. It starts threads which might interfere with later
// death tests, so it must run after all other death tests.
class DeathTestAndMultiThreadsTest : public testing::Test {
protected:
// Starts a thread and waits for it to begin.
void SetUp() override {
thread_.reset(new ThreadWithParam<SpawnThreadNotifications*>(
&ThreadRoutine, &notifications_, nullptr));
notifications_.spawn_thread_started.WaitForNotification();
}
// Tells the thread to finish, and reaps it.
// Depending on the version of the thread library in use,
// a manager thread might still be left running that will interfere
// with later death tests. This is unfortunate, but this class
// cleans up after itself as best it can.
void TearDown() override {
notifications_.spawn_thread_ok_to_terminate.Notify();
}
private:
SpawnThreadNotifications notifications_;
std::unique_ptr<ThreadWithParam<SpawnThreadNotifications*> > thread_;
};
#endif // GTEST_IS_THREADSAFE
// The MixedUpTestSuiteTest test case verifies that Google Test will fail a
// test if it uses a different fixture class than what other tests in
// the same test case use. It deliberately contains two fixture
......@@ -790,8 +733,16 @@ INSTANTIATE_TEST_SUITE_P(PrintingStrings,
testing::Values(std::string("a")),
ParamNameFunc);
// This #ifdef block tests the output of typed tests.
#if GTEST_HAS_TYPED_TEST
// The case where a suite has INSTANTIATE_TEST_SUITE_P but not TEST_P.
using NoTests = ParamTest;
INSTANTIATE_TEST_SUITE_P(ThisIsOdd, NoTests, ::testing::Values("Hello"));
// fails under kErrorOnUninstantiatedParameterizedTest=true
class DetectNotInstantiatedTest : public testing::TestWithParam<int> {};
TEST_P(DetectNotInstantiatedTest, Used) { }
// This would make the test failure from the above go away.
// INSTANTIATE_TEST_SUITE_P(Fix, DetectNotInstantiatedTest, testing::Values(1));
template <typename T>
class TypedTest : public testing::Test {
......@@ -829,11 +780,6 @@ TYPED_TEST(TypedTestWithNames, Success) {}
TYPED_TEST(TypedTestWithNames, Failure) { FAIL(); }
#endif // GTEST_HAS_TYPED_TEST
// This #ifdef block tests the output of type-parameterized tests.
#if GTEST_HAS_TYPED_TEST_P
template <typename T>
class TypedTestP : public testing::Test {
};
......@@ -869,7 +815,20 @@ class TypedTestPNames {
INSTANTIATE_TYPED_TEST_SUITE_P(UnsignedCustomName, TypedTestP, UnsignedTypes,
TypedTestPNames);
#endif // GTEST_HAS_TYPED_TEST_P
template <typename T>
class DetectNotInstantiatedTypesTest : public testing::Test {};
TYPED_TEST_SUITE_P(DetectNotInstantiatedTypesTest);
TYPED_TEST_P(DetectNotInstantiatedTypesTest, Used) {
TypeParam instantiate;
(void)instantiate;
}
REGISTER_TYPED_TEST_SUITE_P(DetectNotInstantiatedTypesTest, Used);
// kErrorOnUninstantiatedTypeParameterizedTest=true would make the above fail.
// Adding the following would make that test failure go away.
//
// typedef ::testing::Types<char, int, unsigned int> MyTypes;
// INSTANTIATE_TYPED_TEST_SUITE_P(All, DetectNotInstantiatedTypesTest, MyTypes);
#if GTEST_HAS_DEATH_TEST
......@@ -879,8 +838,6 @@ INSTANTIATE_TYPED_TEST_SUITE_P(UnsignedCustomName, TypedTestP, UnsignedTypes,
TEST(ADeathTest, ShouldRunFirst) {
}
# if GTEST_HAS_TYPED_TEST
// We rely on the golden file to verify that typed tests whose test
// case name ends with DeathTest are run first.
......@@ -894,10 +851,6 @@ TYPED_TEST_SUITE(ATypedDeathTest, NumericTypes);
TYPED_TEST(ATypedDeathTest, ShouldRunFirst) {
}
# endif // GTEST_HAS_TYPED_TEST
# if GTEST_HAS_TYPED_TEST_P
// We rely on the golden file to verify that type-parameterized tests
// whose test case name ends with DeathTest are run first.
......@@ -915,8 +868,6 @@ REGISTER_TYPED_TEST_SUITE_P(ATypeParamDeathTest, ShouldRunFirst);
INSTANTIATE_TYPED_TEST_SUITE_P(My, ATypeParamDeathTest, NumericTypes);
# endif // GTEST_HAS_TYPED_TEST_P
#endif // GTEST_HAS_DEATH_TEST
// Tests various failure conditions of
......@@ -1078,7 +1029,7 @@ auto dynamic_test = (
"BadDynamicFixture1", "TestBase", nullptr, nullptr, __FILE__, __LINE__,
[]() -> testing::Test* { return new DynamicTest<true>; }),
// Register two tests with the same fixture incorrectly by ommiting the
// Register two tests with the same fixture incorrectly by omitting the
// return type.
testing::RegisterTest(
"BadDynamicFixture2", "FixtureBase", nullptr, nullptr, __FILE__,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment