Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Q
QueryR
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Julien Lopez
QueryR
Commits
e57b822b
Commit
e57b822b
authored
7 years ago
by
Florian Angerer
Browse files
Options
Downloads
Patches
Plain Diff
Parse RUnit test protocol and compare test framework results to GnuR.
parent
d12450b6
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
mx.fastr/mx_fastr_pkgs.py
+50
-28
50 additions, 28 deletions
mx.fastr/mx_fastr_pkgs.py
with
50 additions
and
28 deletions
mx.fastr/mx_fastr_pkgs.py
+
50
−
28
View file @
e57b822b
...
...
@@ -41,6 +41,7 @@ import mx
import
mx_fastr
quiet
=
False
verbose
=
0
graalvm
=
None
def
_fastr_suite_dir
():
...
...
@@ -190,6 +191,7 @@ def pkgtest(args):
--no-install Do not install any packages (can only test installed packages).
--list-versions List packages to be installed/tested without installing/testing them.
--pkg-pattern PATTERN A regular expression to match packages.
--verbose, -v Verbose output.
Return codes:
0: success
...
...
@@ -205,6 +207,12 @@ def pkgtest(args):
if
"
--quiet
"
in
args
:
global
quiet
quiet
=
True
if
"
-v
"
in
args
or
"
--verbose
"
in
args
:
global
verbose
verbose
=
1
elif
"
-V
"
in
args
:
global
verbose
verbose
=
2
install_args
=
list
(
args
)
...
...
@@ -492,6 +500,13 @@ def _set_test_status(fastr_test_info):
ok
,
skipped
,
failed
=
handle_output_file
(
fastr_testfile_status
.
abspath
,
fastr_content
)
if
ok
is
not
None
:
fastr_testfile_status
.
report
=
ok
,
skipped
,
failed
# If a test framework is used, also parse the summary generated by GnuR to compare numbers.
gnur_ok
,
gnur_skipped
,
gnur_failed
=
handle_output_file
(
gnur_testfile_status
.
abspath
,
gnur_content
)
total_fastr
=
ok
+
skipped
+
failed
total_gnur
=
gnur_ok
+
gnur_skipped
+
gnur_failed
if
total_fastr
!=
total_gnur
:
mx
.
log
(
"
Different number of tests executed. FastR = {} vs. GnuR = {}
"
.
format
(
total_fastr
,
total_gnur
))
fastr_testfile_status
.
status
=
"
FAILED
"
else
:
result
,
n_tests_passed
,
n_tests_failed
=
_fuzzy_compare
(
gnur_content
,
fastr_content
,
gnur_testfile_status
.
abspath
,
fastr_testfile_status
.
abspath
,
custom_filters
=
filters
)
if
result
==
-
1
:
...
...
@@ -543,25 +558,26 @@ def _set_test_status(fastr_test_info):
print
'
END checking
'
+
pkg
def
handle_output_file
(
file
,
test_output_file_contents
):
def
handle_output_file
(
test_output_
file
,
test_output_file_contents
):
"""
R package tests are usually distributed over several files. Each file can be interpreted as a test suite.
This function parses the output file of all test suites and tries to detect if it used the testthat or RUnit.
In this case, it parses the summary (number of passed, skipped, failed tests) of these test frameworks.
If none of the frameworks is used, it performs an output diff and tries to determine, how many statements
produces different output, i.e., every statement is considered to be a unit test.
:param file Path to the file.
:param test_output_file_contents: the lines of the output file
:return: A 3-tuple with the number of passed, skipped, and failed tests.
Returns a 3-tuple with the number of passed, skipped, and failed tests.
"""
mx
.
log
(
"
Detecting output type of {!s}
"
.
format
(
file
))
mx
.
log
v
(
"
Detecting output type of {!s}
"
.
format
(
test_output_
file
))
for
i
in
range
(
0
,
len
(
test_output_file_contents
)):
if
"
testthat results
"
in
test_output_file_contents
[
i
]:
mx
.
log
(
"
Detected testthat summary in {!s}
"
.
format
(
file
))
return
_parse_testthat_result
(
test_output_file_contents
,
i
)
elif
"
RUNIT TEST PROTOCOL
"
in
test_output_file_contents
[
i
]:
mx
.
log
(
"
Detected RUNIT test protocol in {!s}
"
.
format
(
file
))
return
_parse_runit_result
(
test_output_file_contents
,
i
)
try
:
if
"
testthat results
"
in
test_output_file_contents
[
i
]:
mx
.
log
(
"
Detected testthat summary in {!s}
"
.
format
(
test_output_file
))
return
_parse_testthat_result
(
test_output_file_contents
,
i
)
elif
"
RUNIT TEST PROTOCOL
"
in
test_output_file_contents
[
i
]:
mx
.
log
(
"
Detected RUNIT test protocol in {!s}
"
.
format
(
test_output_file
))
return
_parse_runit_result
(
test_output_file_contents
,
i
)
except
BaseException
as
e
:
mx
.
log
(
"
Error parsing test framework summary:
"
+
str
(
e
))
# if this test did not use one of the known test frameworks, take the report from the fuzzy compare
return
None
,
None
,
None
...
...
@@ -583,19 +599,6 @@ def _parse_testthat_result(lines, i):
raise
Exception
(
"
Could not parse testthat status line {0}
"
.
format
(
result_line
))
def
_parse_runit_result
(
lines
,
i
):
'''
RUNIT TEST PROTOCOL -- Thu Feb 08 10:54:42 2018
***********************************************
Number of test functions: 20
Number of errors: 0
Number of failures: 0
:param lines:
:param i:
:return:
'''
def
_testthat_parse_part
(
part
):
'''
parses a part like
"
OK: 2
"
...
...
@@ -607,6 +610,26 @@ def _testthat_parse_part(part):
raise
Exception
(
"
could not parse testthat status part {0}
"
.
format
(
part
))
def
_parse_runit_result
(
lines
,
line_idx
):
'''
RUNIT TEST PROTOCOL -- Thu Feb 08 10:54:42 2018
***********************************************
Number of test functions: 20
Number of errors: 0
Number of failures: 0
'''
tests_total
=
0
tests_failed
=
0
for
i
in
range
(
line_idx
,
len
(
lines
)):
split_line
=
lines
[
i
].
split
(
"
:
"
)
if
len
(
split_line
)
>=
2
:
if
"
Number of test functions
"
in
split_line
[
0
]:
tests_total
=
int
(
split_line
[
1
])
elif
"
Number of errors
"
in
split_line
[
0
]
or
"
Number of failures
"
in
split_line
[
0
]:
tests_failed
=
tests_failed
+
int
(
split_line
[
1
])
return
(
tests_total
-
tests_failed
,
0
,
tests_failed
)
def
_find_start
(
content
):
marker
=
"
Type
'
q()
'
to quit R.
"
for
i
in
range
(
len
(
content
)):
...
...
@@ -668,7 +691,7 @@ def _is_ignored_function(fun_name, gnur_content, gnur_stmt, fastr_content, fastr
return
gnur_stmt
!=
-
1
and
fun_name
in
gnur_content
[
gnur_stmt
]
and
fastr_stmt
!=
-
1
and
fun_name
in
fastr_content
[
fastr_stmt
]
def
_fuzzy_compare
(
gnur_content
,
fastr_content
,
gnur_filename
,
fastr_filename
,
custom_filters
=
None
,
verbose
=
False
):
def
_fuzzy_compare
(
gnur_content
,
fastr_content
,
gnur_filename
,
fastr_filename
,
custom_filters
=
None
):
"""
Compares the test output of GnuR and FastR by ignoring implementation-specific differences like header, error,
and warning messages.
...
...
@@ -677,8 +700,7 @@ def _fuzzy_compare(gnur_content, fastr_content, gnur_filename, fastr_filename, c
statements passed and statements failed give the numbers on how many statements produced the same or a different
output, respectively.
"""
if
verbose
:
mx
.
log
(
"
Using custom filters:
\n
"
+
str
(
custom_filters
))
mx
.
logv
(
"
Using custom filters:
\n
"
+
str
(
custom_filters
))
gnur_content
=
_preprocess_content
(
gnur_content
,
custom_filters
)
fastr_content
=
_preprocess_content
(
fastr_content
,
custom_filters
)
gnur_start
=
_find_start
(
gnur_content
)
...
...
@@ -949,7 +971,7 @@ def _parse_filter_file(file_path):
try
:
filters
.
append
(
_parse_filter
(
line
))
except
InvalidFilterException
as
e
:
print
"
invalid filter at line {!s}: {!s}
"
.
format
(
linenr
,
e
print
"
invalid filter at line {!s}: {!s}
"
.
format
(
linenr
,
e
)
return
filters
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment