commit
4be065595e
@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
@ -0,0 +1,17 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class CVE_2017_14680(system_tests.Case):
|
||||
|
||||
bug_no = "73"
|
||||
url = "https://github.com/Exiv2/exiv2/issues/73"
|
||||
|
||||
filename = "{data_path}/003-heap-buffer-over"
|
||||
commands = ["{exiv2} " + filename]
|
||||
stdout = [""]
|
||||
stderr = ["""{exiv2_exception_msg} """ + filename + """:
|
||||
{error_58_message}
|
||||
"""]
|
||||
retval = [1]
|
@ -0,0 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class CVE_2017_14857(system_tests.Case):
|
||||
|
||||
filename = "{data_path}/010_bad_free"
|
||||
commands = ["{exiv2} " + filename]
|
||||
retval = [1]
|
||||
stdout = [""]
|
||||
stderr = [
|
||||
"""{exiv2_exception_msg} """ + filename + """:
|
||||
{error_57_message}
|
||||
"""]
|
@ -0,0 +1,50 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class TamronSupport(system_tests.Case):
|
||||
|
||||
description = "Added support for 'Tamron SP 15-30mm f/2.8 Di VC USD A012' and 'Tamron SP 90mm f/2.8 Di VC USD MACRO1:1'"
|
||||
|
||||
files = [
|
||||
"exiv2-g20.exv",
|
||||
"CanonEF100mmF2.8LMacroISUSM.exv",
|
||||
"TamronSP15-30mmF2.8DiVCUSDA012.exv",
|
||||
"TamronSP90mmF2.8DiVCUSDMacroF004.exv",
|
||||
"TamronSP90mmF2.8DiVCUSDMacroF017.exv"
|
||||
]
|
||||
commands = ["{exiv2} -pa --grep lens/i ../../../test/data/" + files[0]] \
|
||||
+ list(map(
|
||||
lambda fname: "{exiv2} -pa --grep lenstype/i ../../../test/data/" + fname,
|
||||
files[1:]
|
||||
))
|
||||
retval = [0] * len(files)
|
||||
|
||||
stdout = [
|
||||
# exiv2-g20.exv
|
||||
"""Exif.CanonCs.LensType Short 1 Tamron SP 90mm f/2.8 Di VC USD Macro 1:1
|
||||
Exif.CanonCs.Lens Short 3 90.0 mm
|
||||
Exif.Canon.LensModel Ascii 70 TAMRON SP 90mm F/2.8 Di VC USD MACRO1:1 F017
|
||||
Exif.Photo.LensSpecification Rational 4 90/1 90/1 0/0 0/0
|
||||
Exif.Photo.LensModel Ascii 70 TAMRON SP 90mm F/2.8 Di VC USD MACRO1:1 F017
|
||||
Exif.Photo.LensSerialNumber Ascii 12 0000000000
|
||||
""",
|
||||
|
||||
# CanonEF100mmF2.8LMacroISUSM.exv
|
||||
"""Exif.CanonCs.LensType Short 1 Canon EF 100mm f/2.8L Macro IS USM
|
||||
""",
|
||||
|
||||
# TamronSP15-30mmF2.8DiVCUSDA012.exv"
|
||||
"""Exif.CanonCs.LensType Short 1 Tamron SP 15-30mm f/2.8 Di VC USD A012
|
||||
""",
|
||||
|
||||
# TamronSP90mmF2.8DiVCUSDMacroF004.exv
|
||||
"""Exif.CanonCs.LensType Short 1 Tamron SP 90mm f/2.8 Di VC USD Macro 1:1
|
||||
""",
|
||||
|
||||
# TamronSP90mmF2.8DiVCUSDMacroF017.exv
|
||||
"""Exif.CanonCs.LensType Short 1 Tamron SP 90mm f/2.8 Di VC USD Macro 1:1
|
||||
"""
|
||||
""]
|
||||
stderr = [""] * len(files)
|
@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class Issue1305Test(system_tests.Case):
|
||||
err_msg_dir_img = """Warning: Directory Image, entry 0x3030 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Error: Directory Image, entry 0x3030 has invalid size 808464432*1; skipping entry.
|
||||
"""
|
||||
|
||||
err_msg_dir_ph = """Warning: Directory Photo, entry 0x3030 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Error: Directory Photo, entry 0x3030 has invalid size 808464432*1; skipping entry.
|
||||
"""
|
||||
|
||||
err_msg_dir_pentax = """Warning: Directory Pentax, entry 0x3030 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Error: Directory Pentax, entry 0x3030 has invalid size 808464432*1; skipping entry.
|
||||
"""
|
||||
|
||||
name = "regression test for issue 1305"
|
||||
url = "http://dev.exiv2.org/issues/1305"
|
||||
|
||||
filename = "{data_path}/IMGP0006-min.jpg"
|
||||
commands = ["{exiv2} " + filename]
|
||||
stdout = ["""File name : """ + filename + """
|
||||
File size : 12341 Bytes
|
||||
MIME type : image/jpeg
|
||||
Image size : 0 x 0
|
||||
Camera make : PENTAX000000000000000000000000000000000000000000
|
||||
Camera model :
|
||||
Image timestamp :
|
||||
Image number :
|
||||
Exposure time :
|
||||
Aperture :
|
||||
Exposure bias :
|
||||
Flash :
|
||||
Flash bias :
|
||||
Focal length :
|
||||
Subject distance:
|
||||
ISO speed :
|
||||
Exposure mode :
|
||||
Metering mode :
|
||||
Macro mode :
|
||||
Image quality :
|
||||
Exif Resolution :
|
||||
White balance :
|
||||
Thumbnail : None
|
||||
Copyright :
|
||||
Exif comment :
|
||||
|
||||
"""]
|
||||
|
||||
stderr = [
|
||||
"""Error: Directory Image: Next pointer is out of bounds; ignored.
|
||||
"""
|
||||
+ 8 * err_msg_dir_img
|
||||
+ """Warning: Directory Photo has an unexpected next pointer; ignored.
|
||||
"""
|
||||
+ 13 * err_msg_dir_ph
|
||||
+ """Warning: Directory Photo, entry 0x927c has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Warning: Directory Pentax has an unexpected next pointer; ignored.
|
||||
"""
|
||||
+ 6 * err_msg_dir_pentax
|
||||
+ """Warning: Directory Pentax, entry 0x0006 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Warning: Directory Pentax, entry 0x0007 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
"""
|
||||
+ 39 * err_msg_dir_pentax
|
||||
+ 23 * err_msg_dir_ph
|
||||
+ """Warning: Directory Photo, entry 0x3030 has unknown Exif (TIFF) type 48; setting type size 1.
|
||||
Error: Directory Photo, entry 0x3030 has invalid size 808464432*1; skipping entry.
|
||||
"""
|
||||
+ 5 * err_msg_dir_ph
|
||||
+ """Warning: Directory Photo, entry 0x3030 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Error: Directory Photo, entry 0x3030 has invalid size 808452102*1; skipping entry.
|
||||
Warning: Directory Photo, entry 0x3030 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Error: Directory Photo, entry 0x3030 has invalid size 808452103*1; skipping entry.
|
||||
"""
|
||||
+ 3 * err_msg_dir_ph
|
||||
+ err_msg_dir_img
|
||||
+ """Warning: Directory Image, entry 0x3030 has unknown Exif (TIFF) type 12336; setting type size 1.
|
||||
Error: Directory Image, entry 0x3030 has invalid size 1414415696*1; skipping entry.
|
||||
"""
|
||||
+ 36 * err_msg_dir_img
|
||||
+ """Warning: JPEG format error, rc = 5
|
||||
"""]
|
||||
|
||||
retval = [0]
|
@ -0,0 +1,426 @@
|
||||
# TL;DR
|
||||
|
||||
If you just want to write a simple test case, check out the file
|
||||
`writing_tests.md`.
|
||||
|
||||
# Introduction
|
||||
|
||||
This test suite is intended for system tests, i.e. for running a binary with
|
||||
certain parameters and comparing the output against an expected value. This is
|
||||
especially useful for a regression test suite, but can be also used for testing
|
||||
of new features where unit testing is not feasible, e.g. to test new command
|
||||
line parameters.
|
||||
|
||||
The test suite is configured via `INI` style files using Python's builtin
|
||||
[ConfigParser](https://docs.python.org/3/library/configparser.html)
|
||||
module. Such a configuration file looks roughly like this:
|
||||
``` ini
|
||||
[DEFAULT]
|
||||
some_var: some_val
|
||||
|
||||
[section 1]
|
||||
empty_var:
|
||||
multiline_var: this is a multiline string
|
||||
as long as the indentation
|
||||
is present
|
||||
# comments can be inserted
|
||||
# some_var is implicitly present in this section by the DEFAULT section
|
||||
|
||||
[section 2]
|
||||
# set some_var for this section to something else than the default
|
||||
some_var: some_other_val
|
||||
# values from other sections can be inserted
|
||||
vars can have whitespaces: ${some_var} ${section 1: multiline var}
|
||||
multiline var: multiline variables can have
|
||||
|
||||
empty lines too
|
||||
```
|
||||
|
||||
For further details concerning the syntax, please consult the official
|
||||
documentation. The `ConfigParser` module is used with the following defaults:
|
||||
- Comments are started by `#` only
|
||||
- The separator between a variable and the value is `:`
|
||||
- Multiline comments can have empty lines
|
||||
- Extended Interpolation is used (this allows to refer to other sections when
|
||||
inserting values using the `${section:variable}` syntax)
|
||||
|
||||
Please keep in mind that leading and trailing whitespaces are **stripped** from
|
||||
strings when extracting variable values. So this:
|
||||
|
||||
``` ini
|
||||
some_var: some value with whitespaces before and after
|
||||
```
|
||||
is equivalent to this:
|
||||
``` ini
|
||||
some_var:some value with whitespaces before and after
|
||||
```
|
||||
|
||||
The test suite itself uses the builtin `unittest` module of Python to discover
|
||||
and run the individual test cases. The test cases themselves are implemented in
|
||||
Python source files, but the required Python knowledge is minimal.
|
||||
|
||||
## Test suite
|
||||
|
||||
The test suite is configured via one configuration file whose location
|
||||
automatically sets the root directory of the test suite. The `unittest` module
|
||||
then recursively searches all sub-directories with a `__init__.py` file for
|
||||
files of the form `test_*.py`, which it automatically interprets as test cases
|
||||
(more about these in the next section). Python will automatically interpret each
|
||||
directory as a module and use this to format the output, e.g. the test case
|
||||
`regression/crashes/test_bug_15.py` will be interpreted as the module
|
||||
`regression.crashes.test_bug_15`. Thus one can use the directory structure to
|
||||
group test cases.
|
||||
|
||||
The test suite's configuration file should have the following form:
|
||||
|
||||
``` ini
|
||||
[General]
|
||||
timeout: 0.1
|
||||
|
||||
[paths]
|
||||
binary: ../build/bin/binary
|
||||
important_file: ../conf/main.cfg
|
||||
|
||||
[variables]
|
||||
abort_error: ERROR
|
||||
abort_exit value: 1
|
||||
```
|
||||
|
||||
The General section only contains the `timeout` parameter, which is actually
|
||||
optional (when left out 1.0 is assumed). The timeout sets the maximum time in
|
||||
seconds for each command that is run before it is aborted. This allows for test
|
||||
driven development with tests that cause infinite loops or similar hangs in the
|
||||
test suite.
|
||||
|
||||
The paths and variables sections define global variables for the system test
|
||||
suite, which every test case can read. Following the DRY principle, one can put
|
||||
common outputs of the tested binary in a variable, so that changing an error
|
||||
message does not result in an hour long update of the test suite. Both sections
|
||||
are merged together before being passed on to the test cases, thus they must not
|
||||
contain variables with the same name (doing so results in an error).
|
||||
|
||||
While the values in the variables section are simply passed on to the test cases
|
||||
the paths section is special as its contents are interpreted as relative paths
|
||||
(with respect to the test suite's root) and are expanded to absolute paths
|
||||
before being passed to the test cases. This can be used to inform each test case
|
||||
about the location of a built binary or a configuration file without having to
|
||||
rely on environment variables.
|
||||
|
||||
However, sometimes environment variables are very handy to implement variable
|
||||
paths or platform differences (like different build directories or file
|
||||
extensions). For this, the test suite supports the `ENV` and `ENV fallback`
|
||||
sections. In conjunction with the extended interpolation of the `ConfigParser`
|
||||
module, these can be quite useful. Consider the following example:
|
||||
|
||||
``` ini
|
||||
[General]
|
||||
timeout: 0.1
|
||||
|
||||
[ENV]
|
||||
variable_prefix: PREFIX
|
||||
file_extension: FILE_EXT
|
||||
|
||||
[ENV fallback]
|
||||
variable_prefix: ../build
|
||||
|
||||
[paths]
|
||||
binary: ${ENV:variable_prefix}/bin/binary${ENV:file_extension}
|
||||
important_file: ../conf/main.cfg
|
||||
|
||||
[variables]
|
||||
abort_error: ERROR
|
||||
abort_exit value: 1
|
||||
```
|
||||
|
||||
The `ENV` section is, similarly to the `paths` section, special insofar as the
|
||||
variables are extracted from the environment with the given name. E.g. the
|
||||
variable `file_extension` would be set to the value of the environment variable
|
||||
`FILE_EXT`. If the environment variable is not defined, then the test suite will
|
||||
look in the `ENV fallback` section for a fallback. E.g. in the above example
|
||||
`variable_prefix` has the fallback or default value of `../build` which will be
|
||||
used if the environment variable `PREFIX` is not set. If no fallback is provided
|
||||
then an empty string is used instead, which would happen to `file_extension` if
|
||||
`FILE_EXT` would be unset.
|
||||
|
||||
This can be combined with the extended interpolation of Python's `ConfigParser`,
|
||||
which allows to include variables from arbitrary sections into other variables
|
||||
using the `${sect:var_name}` syntax. This would be expanded to the value of
|
||||
`var_name` from the section `sect`. The above example only utilizes this in the
|
||||
`paths` section, but it can also be used in the `variables` section, if that
|
||||
makes sense for the use case.
|
||||
|
||||
Returning to the example config file, the path `binary` would be inferred in the
|
||||
following steps:
|
||||
1. extract `PREFIX` & `FILE_EXT` from the environment, if they don't exist use
|
||||
the default values from `ENV fallback` or ""
|
||||
2. substitute the strings `${ENV:variable_prefix}` and `${ENV:file_extension}`
|
||||
3. expand the relative path to an absolute path
|
||||
|
||||
Please note that while the `INI` file allows for variables with whitespaces or
|
||||
`-` in their names, such variables will cause errors as they are invalid
|
||||
variable names in Python.
|
||||
|
||||
|
||||
## Test cases
|
||||
|
||||
The test cases are defined in Python source files utilizing the unittest module,
|
||||
thus every file must also be a valid Python file. Each file defining a test case
|
||||
must start with `test_` and have the file extension `py`. To be discovered by
|
||||
the unittest module it must reside in a directory with a (empty) `__init__.py`
|
||||
file.
|
||||
|
||||
A test case should test one logical unit, e.g. test for regressions of a certain
|
||||
bug or check if a command line option works. Each test case can run multiple
|
||||
commands which results are compared to an expected standard output, standard
|
||||
error and return value. Should differences arise or should one of the commands
|
||||
take too long, then an error message with the exact differences is shown to the
|
||||
user.
|
||||
|
||||
An example test case file would look like this:
|
||||
|
||||
``` python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class AnInformativeName(system_tests.Case):
|
||||
|
||||
filename = "invalid_input_file"
|
||||
commands = [
|
||||
"{binary} -c {import_file} -i {filename}"
|
||||
]
|
||||
retval = ["{abort_exit_value}"]
|
||||
stdout = ["Reading {filename}"]
|
||||
stderr = [
|
||||
"""{abort_error}
|
||||
error in {filename}
|
||||
"""
|
||||
]
|
||||
```
|
||||
|
||||
The first 6 lines are necessary boilerplate to pull in the necessary routines to
|
||||
run the actual tests (these are implemented in the module `system_tests` with
|
||||
the class `system_tests.Case` extending `unittest.TestCase`). When adding new
|
||||
tests one should choose a new class name that briefly summarizes the test. Note
|
||||
that the file name (without the extension) with the directory structure is
|
||||
interpreted as the module by Python and pre-pended to the class name when
|
||||
reporting about the tests. E.g. the file `regression/crashes/test_bug_15.py`
|
||||
with the class `OutOfBoundsRead` gets reported as
|
||||
`regression.crashes.test_bug_15.OutOfBoundsRead** already including a brief
|
||||
summary of this test.
|
||||
|
||||
**Caution:** Always import `system_tests` in the aforementioned syntax and don't
|
||||
use `from system_tests import Case`. This will not work, as the `system_tests`
|
||||
module stores the suite's config internally which will not be available if you
|
||||
perform a `from system_tests import Case` (this causes Python to create a copy
|
||||
of the class `system_tests.Case` for your module, without reading the
|
||||
configuration file).
|
||||
|
||||
In the following lines the lists `commands`, `retval`, `stdout` and `stderr`
|
||||
should be defined. These are lists of strings and must all have the same amount
|
||||
of elements.
|
||||
|
||||
The test suite at first takes all these strings and substitutes all values in
|
||||
curly braces with variables either defined in this class alongside (like
|
||||
`filename` in the above example) or with the values defined in the test suite's
|
||||
configuration file. Please note that defining a variable with the same name as a
|
||||
variable in the suite's configuration file will result in an error (otherwise
|
||||
one of the variables would take precedence leading to unexpected results). The
|
||||
substitution of values in performed using Python's string `format()` method and
|
||||
more elaborate format strings can be used when necessary.
|
||||
|
||||
In the above example the command would thus expand to:
|
||||
``` shell
|
||||
/path/to/the/dir/build/bin/binary -c /path/to/the/dir/conf/main.cfg -i invalid_input_file
|
||||
```
|
||||
and similarly for `stdout` and `stderr`.
|
||||
|
||||
Once the substitution is performed, each command is run using Python's
|
||||
`subprocess` module, its output is compared to the values in `stdout` and
|
||||
`stderr` and its return value to `retval`. Please note that for portability
|
||||
reasons the subprocess module is run with `shell=False`, thus shell expansions
|
||||
or pipes will not work.
|
||||
|
||||
As the test cases are implemented in Python, one can take full advantage of
|
||||
Python for the construction of the necessary lists. For example when 10 commands
|
||||
should be run and all return 0, one can write `retval = 10 * [0]` instead of
|
||||
writing 0 ten times. The same is of course possible for strings.
|
||||
|
||||
There are however some peculiarities with multiline strings in Python. Normal
|
||||
strings start and end with a single `"` but multiline strings start with three
|
||||
`"`. Also, while the variable names must be indented, new lines in multiline
|
||||
strings must not or additional whitespaces will be added. E.g.:
|
||||
|
||||
``` python
|
||||
stderr = [
|
||||
"""something
|
||||
else"""
|
||||
]
|
||||
```
|
||||
will actually result in the string:
|
||||
|
||||
```
|
||||
something
|
||||
else
|
||||
```
|
||||
and not:
|
||||
```
|
||||
something
|
||||
else
|
||||
```
|
||||
as the indentation might have suggested.
|
||||
|
||||
Also note that in this example the string will not be terminated with a newline
|
||||
character. To achieve that put the `"""` on the following line.
|
||||
|
||||
|
||||
## Advanced test cases
|
||||
|
||||
This section describes more advanced features that are probably not necessary
|
||||
the "standard" usage of the test suite.
|
||||
|
||||
|
||||
### Creating file copies
|
||||
|
||||
For tests that modify their input file it is useful to run these with a
|
||||
disposable copy of the input file and not with the original. For this purpose
|
||||
the test suite features a decorator which creates a copy of the supplied files
|
||||
and deletes the copies after the test ran.
|
||||
|
||||
Example:
|
||||
``` python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
@system_tests.CopyFiles("{filename}", "{some_path}/another_file.txt")
|
||||
class AnInformativeName(system_tests.Case):
|
||||
|
||||
filename = "invalid_input_file"
|
||||
commands = [
|
||||
"{binary} -c {import_file} -i {filename}"
|
||||
]
|
||||
retval = ["{abort_exit_value}"]
|
||||
stdout = ["Reading {filename}"]
|
||||
stderr = [
|
||||
"""{abort_error}
|
||||
error in {filename}
|
||||
"""
|
||||
]
|
||||
```
|
||||
|
||||
In this example, the test suite would automatically create a copy of the files
|
||||
`invalid_input_file` and `{some_path}/another_file.txt` (`some_path` would be of
|
||||
course expanded too) named `invalid_input_file_copy` and
|
||||
`{some_path}/another_file_copy.txt`. After the test ran, the copies are
|
||||
deleted. Please note that variable expansion in the filenames is possible.
|
||||
|
||||
|
||||
### Customizing the output check
|
||||
|
||||
Some tests do not require a "brute-force" comparison of the whole output of a
|
||||
program but only a very simple check (e.g. that a string is present). For these
|
||||
cases, one can customize how stdout and stderr checked for errors.
|
||||
|
||||
The `system_tests.Case` class has two public functions for the check of stdout &
|
||||
stderr: `compare_stdout` & `compare_stderr`. They have the following interface:
|
||||
``` python
|
||||
compare_stdout(self, i, command, got_stdout, expected_stdout)
|
||||
compare_stderr(self, i, command, got_stderr, expected_stderr)
|
||||
```
|
||||
with the parameters:
|
||||
- i: index of the command in the `commands` list
|
||||
- command: a string of the actually invoked command
|
||||
- got_stdout/stderr: the obtained stdout, post-processed depending on the
|
||||
platform so that lines always end with `\n`
|
||||
- expected_stdout/stderr: the expected output extracted from
|
||||
`self.stdout`/`self.stderr`
|
||||
|
||||
These functions can be overridden in child classes to perform custom checks (or
|
||||
to omit them completely, too). Please however note, that it is not possible to
|
||||
customize how the return value is checked. This is indented, as the return value
|
||||
is often used by the OS to indicate segfaults and ignoring it (in combination
|
||||
with flawed checks of the output) could lead to crashes not being noticed.
|
||||
|
||||
|
||||
### Manually expanding variables in strings
|
||||
|
||||
In case completely custom checks have to be run but one still wants to access
|
||||
the variables from the test suite, the class `system_test.Case` provides the
|
||||
function `expand_variables(self, string)`. It performs the previously described
|
||||
variable substitution using the test suite's configuration file.
|
||||
|
||||
Unfortunately, it has to run in a class member function. The `setUp()` function
|
||||
can be used for this, as it is run before each test. For example like this:
|
||||
``` python
|
||||
class SomeName(system_tests.Case):
|
||||
|
||||
def setUp(self):
|
||||
self.commands = [self.expand_variables("{some_var}/foo.txt")]
|
||||
self.stderr = [""]
|
||||
self.stdout = [self.expand_variables("{success_message}")]
|
||||
self.retval = [0]
|
||||
```
|
||||
|
||||
This example will work, as the test runner reads the data for `commands`,
|
||||
`stderr`, `stdout` and `retval` from the class instance. What however will not
|
||||
work is creating a new member in `setUp()` and trying to use it as a variable
|
||||
for expansion, like this:
|
||||
``` python
|
||||
class SomeName(system_tests.Case):
|
||||
|
||||
def setUp(self):
|
||||
self.new_var = "foo"
|
||||
self.another_string = self.expand_variables("{new_var}")
|
||||
```
|
||||
|
||||
This example fails in `self.expand_variables` because the expansion uses only
|
||||
static class members (which `new_var` is not). Also, if you modify a static
|
||||
class member in `setUp()` the changed version will **not** be used for variable
|
||||
expansion, as the variables are saved in a new dictionary **before** `setUp()`
|
||||
runs. Thus this:
|
||||
``` python
|
||||
class SomeName(system_tests.Case):
|
||||
|
||||
new_var = "foo"
|
||||
|
||||
def setUp(self):
|
||||
self.new_var = "bar"
|
||||
self.another_string = self.expand_variables("{new_var}")
|
||||
```
|
||||
|
||||
will result in `another_string` being "foo" and not "bar".
|
||||
|
||||
|
||||
### Possible pitfalls
|
||||
|
||||
- Do not provide a custom `setUpClass()` function for the test
|
||||
cases. `setUpClass()` is used by `system_tests.Case` to store the variables
|
||||
for expansion.
|
||||
|
||||
- Keep in mind that the variable expansion uses Python's `format()`
|
||||
function. This can make it more cumbersome to include formatted strings into
|
||||
variables like `commands` which will likely contain other variables from the
|
||||
test suite. E.g.: `commands = ["{binary} {:s}".format(f) for f in files]` will
|
||||
not work as `format()` will expect a value for binary. This can be worked
|
||||
around using either the old Python formatting via `%` or by formatting first
|
||||
and then concatenating the problematic parts.
|
||||
|
||||
|
||||
## Running the test suite
|
||||
|
||||
The test suite is written for Python 3 but is in principle also compatible with
|
||||
Python 2, albeit it is not regularly tested, so its functionality is not
|
||||
guaranteed with Python 2.
|
||||
|
||||
Then navigate to the `tests/` subdirectory and run:
|
||||
``` shell
|
||||
python3 runner.py
|
||||
```
|
||||
|
||||
The runner script also supports the optional arguments `--config_file` which
|
||||
allows to provide a different test suite configuration file than the default
|
||||
`suite.conf`. It also forwards the verbosity setting via the `-v`/`--verbose`
|
||||
flags to Python's unittest module.
|
@ -0,0 +1,35 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import unittest
|
||||
import sys
|
||||
|
||||
import system_tests
|
||||
|
||||
parser = argparse.ArgumentParser(description="The system test suite")
|
||||
|
||||
parser.add_argument(
|
||||
"--config_file",
|
||||
type=str,
|
||||
nargs=1,
|
||||
default=['suite.conf']
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose", "-v",
|
||||
action='count',
|
||||
default=1
|
||||
)
|
||||
args = parser.parse_args()
|
||||
conf_file = args.config_file[0]
|
||||
discovery_root = os.path.dirname(conf_file)
|
||||
|
||||
system_tests.configure_suite(conf_file)
|
||||
|
||||
discovered_tests = unittest.TestLoader().discover(discovery_root)
|
||||
test_res = unittest.runner.TextTestRunner(verbosity=args.verbose)\
|
||||
.run(discovered_tests)
|
||||
|
||||
sys.exit(0 if len(test_res.failures) + len(test_res.errors) == 0 else 1)
|
@ -0,0 +1,20 @@
|
||||
[General]
|
||||
timeout: 1
|
||||
|
||||
[ENV]
|
||||
exiv2_path: EXIV2_PATH
|
||||
binary_extension: EXIV2_EXT
|
||||
|
||||
[ENV fallback]
|
||||
exiv2_path: ../build/bin
|
||||
|
||||
[paths]
|
||||
exiv2: ${ENV:exiv2_path}/exiv2${ENV:binary_extension}
|
||||
exiv2json: ${ENV:exiv2_path}/exiv2json${ENV:binary_extension}
|
||||
data_path: ../test/data
|
||||
tiff-test: ${ENV:exiv2_path}/tiff-test${ENV:binary_extension}
|
||||
|
||||
[variables]
|
||||
error_58_message: corrupted image metadata
|
||||
error_57_message: invalid memory allocation request
|
||||
exiv2_exception_msg: Exiv2 exception in print action for file
|
@ -0,0 +1,331 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import configparser
|
||||
import os
|
||||
import inspect
|
||||
import subprocess
|
||||
import threading
|
||||
import shlex
|
||||
import sys
|
||||
import shutil
|
||||
import unittest
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
def _cmd_splitter(cmd):
|
||||
return cmd
|
||||
|
||||
def _process_output_post(output):
|
||||
return output.replace('\r\n', '\n')
|
||||
|
||||
else:
|
||||
def _cmd_splitter(cmd):
|
||||
return shlex.split(cmd)
|
||||
|
||||
def _process_output_post(output):
|
||||
return output
|
||||
|
||||
|
||||
def _disjoint_dict_merge(d1, d2):
|
||||
"""
|
||||
Merges two dictionaries with no common keys together and returns the result.
|
||||
|
||||
>>> d1 = {"a": 1}
|
||||
>>> d2 = {"b": 2, "c": 3}
|
||||
>>> _disjoint_dict_merge(d1, d2) == {"a": 1, "b": 2, "c": 3}
|
||||
True
|
||||
|
||||
Calling this function with dictionaries that share keys raises a ValueError:
|
||||
>>> _disjoint_dict_merge({"a": 1, "b": 6}, {"b": 2, "a": 3})
|
||||
Traceback (most recent call last):
|
||||
..
|
||||
ValueError: Dictionaries have common keys.
|
||||
|
||||
"""
|
||||
inter = set(d1.keys()).intersection(set(d2.keys()))
|
||||
if len(inter) > 0:
|
||||
raise ValueError("Dictionaries have common keys.")
|
||||
res = d1.copy()
|
||||
res.update(d2)
|
||||
return res
|
||||
|
||||
|
||||
_parameters = {}
|
||||
|
||||
|
||||
def configure_suite(config_file):
|
||||
"""
|
||||
Populates a global datastructure with the parameters from the suite's
|
||||
configuration file.
|
||||
|
||||
This function performs the following steps:
|
||||
1. read in the file ``config_file`` via the ConfigParser module using
|
||||
extended interpolation
|
||||
2. check that the sections ``variables`` and ``paths`` are disjoint
|
||||
3. extract the environment variables given in the ``ENV`` section
|
||||
4. save all entries from the ``variables`` section in the global
|
||||
datastructure
|
||||
5. interpret all entries in the ``paths`` section as relative paths from the
|
||||
configuration file, expand them to absolute paths and save them in the
|
||||
global datastructure
|
||||
|
||||
For further information concerning the rationale behind this, please consult
|
||||
the documentation in ``doc.md``.
|
||||
"""
|
||||
|
||||
if not os.path.exists(config_file):
|
||||
raise ValueError(
|
||||
"Test suite config file {:s} does not exist"
|
||||
.format(os.path.abspath(config_file))
|
||||
)
|
||||
|
||||
config = configparser.ConfigParser(
|
||||
interpolation=configparser.ExtendedInterpolation(),
|
||||
delimiters=(':'),
|
||||
comment_prefixes=('#')
|
||||
)
|
||||
config.read(config_file)
|
||||
|
||||
_parameters["suite_root"] = os.path.split(os.path.abspath(config_file))[0]
|
||||
_parameters["timeout"] = config.getfloat("General", "timeout", fallback=1.0)
|
||||
|
||||
if 'variables' in config and 'paths' in config:
|
||||
intersecting_keys = set(config["paths"].keys())\
|
||||
.intersection(set(config["variables"].keys()))
|
||||
if len(intersecting_keys) > 0:
|
||||
raise ValueError(
|
||||
"The sections 'paths' and 'variables' must not share keys, "
|
||||
"but they have the following common key{:s}: {:s}"
|
||||
.format(
|
||||
's' if len(intersecting_keys) > 1 else '',
|
||||
', '.join(k for k in intersecting_keys)
|
||||
)
|
||||
)
|
||||
|
||||
# extract variables from the environment
|
||||
for key in config['ENV']:
|
||||
if key in config['ENV fallback']:
|
||||
fallback = config['ENV fallback'][key]
|
||||
else:
|
||||
fallback = ""
|
||||
config['ENV'][key] = os.getenv(config['ENV'][key]) or fallback
|
||||
|
||||
if 'variables' in config:
|
||||
for key in config['variables']:
|
||||
_parameters[key] = config['variables'][key]
|
||||
|
||||
if 'paths' in config:
|
||||
for key in config['paths']:
|
||||
rel_path = config['paths'][key]
|
||||
abs_path = os.path.abspath(
|
||||
os.path.join(_parameters["suite_root"], rel_path)
|
||||
)
|
||||
if not os.path.exists(abs_path):
|
||||
raise ValueError(
|
||||
"Path replacement for {short}: {abspath} does not exist"
|
||||
" (was expanded from {rel})".format(
|
||||
short=key,
|
||||
abspath=abs_path,
|
||||
rel=rel_path)
|
||||
)
|
||||
_parameters[key] = abs_path
|
||||
|
||||
|
||||
def _setUp_factory(old_setUp, *files):
|
||||
"""
|
||||
Factory function that returns a setUp function suitable to replace the
|
||||
existing setUp of a unittest.TestCase. The returned setUp calls at first
|
||||
old_setUp(self) and then creates a copy of all files in *files with the
|
||||
name: fname.ext -> fname_copy.ext
|
||||
|
||||
All file names in *files are at first expanded using self.expand_variables()
|
||||
and the path to the copy is saved in self._file_copies
|
||||
"""
|
||||
def setUp(self):
|
||||
old_setUp(self)
|
||||
self._file_copies = []
|
||||
for f in files:
|
||||
expanded_fname = self.expand_variables(f)
|
||||
fname, ext = os.path.splitext(expanded_fname)
|
||||
new_name = fname + '_copy' + ext
|
||||
self._file_copies.append(
|
||||
shutil.copyfile(expanded_fname, new_name)
|
||||
)
|
||||
return setUp
|
||||
|
||||
|
||||
def _tearDown_factory(old_tearDown):
|
||||
"""
|
||||
Factory function that returns a new tearDown method to replace an existing
|
||||
tearDown method. It at first deletes all files in self._file_copies and then
|
||||
calls old_tearDown(self).
|
||||
This factory is intended to be used in conjunction with _setUp_factory
|
||||
"""
|
||||
def tearDown(self):
|
||||
for f in self._file_copies:
|
||||
os.remove(f)
|
||||
old_tearDown(self)
|
||||
return tearDown
|
||||
|
||||
|
||||
def CopyFiles(*files):
|
||||
"""
|
||||
Decorator for subclasses of system_test.Case that automatically creates a
|
||||
copy of the files specified as the parameters to the decorator.
|
||||
|
||||
Example:
|
||||
>>> @CopyFiles("{some_var}/file.txt", "{another_var}/other_file.png")
|
||||
class Foo(Case):
|
||||
pass
|
||||
|
||||
The decorator will inject new setUp method that at first calls the already
|
||||
defined setUp(), then expands all supplied file names using
|
||||
Case.expand_variables and then creates copies by appending '_copy' before
|
||||
the file extension. The paths to the copies are stored in self._file_copies.
|
||||
|
||||
The decorator also injects a new tearDown method that deletes all files in
|
||||
self._file_copies and then calls the original tearDown method.
|
||||
|
||||
This function will also complain if it is called without arguments or
|
||||
without paranthesis, which is valid decorator syntax but is obviously a bug
|
||||
in this case.
|
||||
"""
|
||||
if len(files) == 0:
|
||||
raise ValueError("No files to copy supplied.")
|
||||
elif len(files) == 1:
|
||||
if isinstance(files[0], type):
|
||||
raise UserWarning(
|
||||
"Decorator used wrongly, must be called with filenames in paranthesis"
|
||||
)
|
||||
|
||||
def wrapper(cls):
|
||||
old_setUp = cls.setUp
|
||||
cls.setUp = _setUp_factory(old_setUp, *files)
|
||||
|
||||
old_tearDown = cls.tearDown
|
||||
cls.tearDown = _tearDown_factory(old_tearDown)
|
||||
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class Case(unittest.TestCase):
|
||||
"""
|
||||
System test case base class, provides the functionality to interpret static
|
||||
class members as system tests and runs them.
|
||||
|
||||
This class reads in the members commands, retval, stdout, stderr and runs
|
||||
the format function on each, where format is called with the kwargs being a
|
||||
merged dictionary of all variables that were extracted from the suite's
|
||||
configuration file and all static members of the current class.
|
||||
|
||||
The resulting commands are then run using the subprocess module and compared
|
||||
against the expected values that were provided in the static
|
||||
members. Furthermore a threading.Timer is used to abort the execution if a
|
||||
configured timeout is reached.
|
||||
|
||||
The class itself must be inherited from, otherwise it is not useful at all,
|
||||
as it does not provide any static members that could be used to run system
|
||||
tests. However, a class that inherits from this class needn't provide any
|
||||
member functions at all, the inherited test_run() function performs all
|
||||
required functionality in child classes.
|
||||
"""
|
||||
|
||||
""" maxDiff set so that arbitrarily large diffs will be shown """
|
||||
maxDiff = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
"""
|
||||
This function adds the variables variable_dict & work_dir to the class.
|
||||
|
||||
work_dir - set to the file where the current class is defined
|
||||
variable_dict - a merged dictionary of all static members of the current
|
||||
class and all variables extracted from the suite's
|
||||
configuration file
|
||||
"""
|
||||
cls.variable_dict = _disjoint_dict_merge(cls.__dict__, _parameters)
|
||||
cls.work_dir = os.path.dirname(inspect.getfile(cls))
|
||||
|
||||
def compare_stdout(self, i, command, got_stdout, expected_stdout):
|
||||
"""
|
||||
Function to compare whether the expected & obtained stdout match.
|
||||
|
||||
This function is automatically invoked by test_run with the following
|
||||
parameters:
|
||||
i - the index of the current command that is run in self.commands
|
||||
command - the command that was run
|
||||
got_stdout - the obtained stdout, post-processed depending on the
|
||||
platform so that lines always end with \n
|
||||
expected_stdout - the expected stdout extracted from self.stdout
|
||||
|
||||
The default implementation simply uses assertMultiLineEqual from
|
||||
unittest.TestCase. This function can be overridden in a child class to
|
||||
implement a custom check.
|
||||
"""
|
||||
self.assertMultiLineEqual(expected_stdout, got_stdout)
|
||||
|
||||
def compare_stderr(self, i, command, got_stderr, expected_stderr):
|
||||
"""
|
||||
Same as compare_stdout only for standard-error.
|
||||
"""
|
||||
self.assertMultiLineEqual(expected_stderr, got_stderr)
|
||||
|
||||
def expand_variables(self, string):
|
||||
"""
|
||||
Expands all variables in curly braces in the given string using the
|
||||
dictionary variable_dict.
|
||||
|
||||
The expansion itself is performed by the builtin string method format().
|
||||
A KeyError indicates that the supplied string contains a variable
|
||||
in curly braces that is missing from self.variable_dict
|
||||
"""
|
||||
return str(string).format(**self.variable_dict)
|
||||
|
||||
def test_run(self):
|
||||
"""
|
||||
Actual system test function which runs the provided commands,
|
||||
pre-processes all variables and post processes the output before passing
|
||||
it on to compare_stderr() & compare_stdout().
|
||||
"""
|
||||
|
||||
for i, command, retval, stdout, stderr in zip(range(len(self.commands)),
|
||||
self.commands,
|
||||
self.retval,
|
||||
self.stdout,
|
||||
self.stderr):
|
||||
command, retval, stdout, stderr = map(
|
||||
self.expand_variables, [command, retval, stdout, stderr]
|
||||
)
|
||||
retval = int(retval)
|
||||
timeout = {"flag": False}
|
||||
|
||||
proc = subprocess.Popen(
|
||||
_cmd_splitter(command),
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
cwd=self.work_dir
|
||||
)
|
||||
|
||||
def timeout_reached(timeout):
|
||||
timeout["flag"] = True
|
||||
proc.kill()
|
||||
|
||||
t = threading.Timer(
|
||||
_parameters["timeout"], timeout_reached, args=[timeout]
|
||||
)
|
||||
t.start()
|
||||
got_stdout, got_stderr = proc.communicate()
|
||||
t.cancel()
|
||||
|
||||
self.assertFalse(timeout["flag"] and "Timeout reached")
|
||||
self.compare_stdout(
|
||||
i, command,
|
||||
_process_output_post(got_stdout.decode('utf-8')), stdout
|
||||
)
|
||||
self.compare_stderr(
|
||||
i, command,
|
||||
_process_output_post(got_stderr.decode('utf-8')), stderr
|
||||
)
|
||||
self.assertEqual(retval, proc.returncode)
|
@ -0,0 +1,154 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class OutputTagExtract(system_tests.Case):
|
||||
"""
|
||||
Test whether exiv2 -pa $file and exiv2 -pS $file produces the same output.
|
||||
"""
|
||||
|
||||
def parse_pa(self, stdout):
|
||||
"""
|
||||
Parse the output of exiv2 -pa $file, which looks like this:
|
||||
|
||||
Exif.Image.NewSubfileType Long 1 Primary image
|
||||
|
||||
into a list of dictionaries with the keys:
|
||||
tag: last word of the first column (here NewSubfileType)
|
||||
type: lowercase second column
|
||||
len: third column
|
||||
val: fourth column
|
||||
|
||||
It is furthermore checked that the first column begins with 'Exif.Image'
|
||||
"""
|
||||
data = []
|
||||
|
||||
for line in stdout:
|
||||
tmp = line.split()
|
||||
|
||||
exif, image, tag = tmp[0].split('.')
|
||||
self.assertEquals(exif, "Exif")
|
||||
self.assertEquals(image, "Image")
|
||||
|
||||
data.append({
|
||||
"tag": tag,
|
||||
"type": tmp[1].lower(),
|
||||
"len": int(tmp[2]),
|
||||
"val": " ".join(tmp[3:])
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
def parse_pS(self, stdout):
|
||||
"""
|
||||
Parse the output of exiv2 -pS $file, which looks like this:
|
||||
|
||||
STRUCTURE OF TIFF FILE (II): $file
|
||||
address | tag | type | count | offset | value
|
||||
254 | 0x00fe NewSubfileType | LONG | 1 | | 0
|
||||
...
|
||||
END $file
|
||||
|
||||
into a list of dictionaries with the following keys:
|
||||
tag: the string after the hex number in the second column
|
||||
type: lowercase third column
|
||||
len: fourth column
|
||||
val: fifth column
|
||||
|
||||
The first two lines and the last line are ignored, as they contain
|
||||
explanatory output.
|
||||
"""
|
||||
data = []
|
||||
|
||||
for i, line in enumerate(stdout):
|
||||
if i < 2 or i == len(stdout) - 1:
|
||||
continue
|
||||
|
||||
tmp = line.split(" | ")
|
||||
data.append({
|
||||
"tag": tmp[1].split()[1],
|
||||
"type": tmp[2].replace(' ', '').lower(),
|
||||
"len": int(tmp[3].replace(' ', '')),
|
||||
"val": tmp[5]
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
def compare_pS_pa(self):
|
||||
"""
|
||||
Compares the output from self.parse_pa() and self.parse_pS() (saved in
|
||||
self.pa_data & self.pS_data respectively).
|
||||
All dictionaries in the lists are compared for equality for the keys
|
||||
tag, len and type but only some for val. This is due to differently
|
||||
processed output (exiv2 -pa produces more readable output,
|
||||
e.g. compression is written in words and not as a number as it is by
|
||||
exiv2 -pS)
|
||||
"""
|
||||
for pa_elem, pS_elem in zip(self.pa_data, self.pS_data):
|
||||
for key in ["tag", "type", "len"]:
|
||||
self.assertEquals(pa_elem[key], pS_elem[key])
|
||||
|
||||
if pa_elem["tag"] in [
|
||||
"ImageWidth", "ImageLength", "BitsPerSample",
|
||||
"DocumentName", "ImageDescription", "StripOffsets",
|
||||
"SamplesPerPixel", "StripByteCounts", "PlanarConfiguration"]:
|
||||
self.assertEquals(pa_elem["val"], pS_elem["val"])
|
||||
|
||||
def compare_stdout(self, i, command, got_stdout, expected_stdout):
|
||||
super().compare_stdout(i, command, got_stdout, expected_stdout)
|
||||
|
||||
if '-pa' in command:
|
||||
self.pa_data = self.parse_pa(got_stdout.splitlines())
|
||||
if '-pS' in command:
|
||||
self.pS_data = self.parse_pS(got_stdout.splitlines())
|
||||
|
||||
if i == 1:
|
||||
self.compare_pS_pa()
|
||||
|
||||
commands = [
|
||||
"{exiv2} %s {data_path}/mini9.tif" % (opt) for opt in ["-pa", "-pS"]
|
||||
]
|
||||
|
||||
stderr = [""] * 2
|
||||
retval = [0] * 2
|
||||
stdout = [
|
||||
"""Exif.Image.NewSubfileType Long 1 Primary image
|
||||
Exif.Image.ImageWidth Short 1 9
|
||||
Exif.Image.ImageLength Short 1 9
|
||||
Exif.Image.BitsPerSample Short 3 8 8 8
|
||||
Exif.Image.Compression Short 1 Uncompressed
|
||||
Exif.Image.PhotometricInterpretation Short 1 RGB
|
||||
Exif.Image.DocumentName Ascii 24 /home/ahuggel/mini9.tif
|
||||
Exif.Image.ImageDescription Ascii 18 Created with GIMP
|
||||
Exif.Image.StripOffsets Long 1 8
|
||||
Exif.Image.Orientation Short 1 top, left
|
||||
Exif.Image.SamplesPerPixel Short 1 3
|
||||
Exif.Image.RowsPerStrip Short 1 64
|
||||
Exif.Image.StripByteCounts Long 1 243
|
||||
Exif.Image.XResolution Rational 1 72
|
||||
Exif.Image.YResolution Rational 1 72
|
||||
Exif.Image.PlanarConfiguration Short 1 1
|
||||
Exif.Image.ResolutionUnit Short 1 inch
|
||||
""",
|
||||
"""STRUCTURE OF TIFF FILE (II): {data_path}/mini9.tif
|
||||
address | tag | type | count | offset | value
|
||||
254 | 0x00fe NewSubfileType | LONG | 1 | | 0
|
||||
266 | 0x0100 ImageWidth | SHORT | 1 | | 9
|
||||
278 | 0x0101 ImageLength | SHORT | 1 | | 9
|
||||
290 | 0x0102 BitsPerSample | SHORT | 3 | 462 | 8 8 8
|
||||
302 | 0x0103 Compression | SHORT | 1 | | 1
|
||||
314 | 0x0106 PhotometricInterpretation | SHORT | 1 | | 2
|
||||
326 | 0x010d DocumentName | ASCII | 24 | 468 | /home/ahuggel/mini9.tif
|
||||
338 | 0x010e ImageDescription | ASCII | 18 | 492 | Created with GIMP
|
||||
350 | 0x0111 StripOffsets | LONG | 1 | | 8
|
||||
362 | 0x0112 Orientation | SHORT | 1 | | 1
|
||||
374 | 0x0115 SamplesPerPixel | SHORT | 1 | | 3
|
||||
386 | 0x0116 RowsPerStrip | SHORT | 1 | | 64
|
||||
398 | 0x0117 StripByteCounts | LONG | 1 | | 243
|
||||
410 | 0x011a XResolution | RATIONAL | 1 | 510 | 1207959552/16777216
|
||||
422 | 0x011b YResolution | RATIONAL | 1 | 518 | 1207959552/16777216
|
||||
434 | 0x011c PlanarConfiguration | SHORT | 1 | | 1
|
||||
446 | 0x0128 ResolutionUnit | SHORT | 1 | | 2
|
||||
END {data_path}/mini9.tif
|
||||
"""]
|
@ -0,0 +1,60 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
@system_tests.CopyFiles("{data_path}/mini9.tif")
|
||||
class TestTiffTestProg(system_tests.Case):
|
||||
|
||||
commands = ["{tiff-test} {data_path}/mini9_copy.tif"]
|
||||
|
||||
stdout = [
|
||||
"""Test 1: Writing empty Exif data without original binary data: ok.
|
||||
Test 2: Writing empty Exif data with original binary data: ok.
|
||||
Test 3: Wrote non-empty Exif data without original binary data:
|
||||
Exif.Image.ExifTag 0x8769 Long 1 26
|
||||
Exif.Photo.DateTimeOriginal 0x9003 Ascii 18 Yesterday at noon
|
||||
MIME type: image/tiff
|
||||
Image size: 9 x 9
|
||||
Before
|
||||
Exif.Image.NewSubfileType 0x00fe Long 1 0
|
||||
Exif.Image.ImageWidth 0x0100 Short 1 9
|
||||
Exif.Image.ImageLength 0x0101 Short 1 9
|
||||
Exif.Image.BitsPerSample 0x0102 Short 3 8 8 8
|
||||
Exif.Image.Compression 0x0103 Short 1 1
|
||||
Exif.Image.PhotometricInterpretation 0x0106 Short 1 2
|
||||
Exif.Image.DocumentName 0x010d Ascii 24 /home/ahuggel/mini9.tif
|
||||
Exif.Image.ImageDescription 0x010e Ascii 18 Created with GIMP
|
||||
Exif.Image.StripOffsets 0x0111 Long 1 8
|
||||
Exif.Image.Orientation 0x0112 Short 1 1
|
||||
Exif.Image.SamplesPerPixel 0x0115 Short 1 3
|
||||
Exif.Image.RowsPerStrip 0x0116 Short 1 64
|
||||
Exif.Image.StripByteCounts 0x0117 Long 1 243
|
||||
Exif.Image.XResolution 0x011a Rational 1 1207959552/16777216
|
||||
Exif.Image.YResolution 0x011b Rational 1 1207959552/16777216
|
||||
Exif.Image.PlanarConfiguration 0x011c Short 1 1
|
||||
Exif.Image.ResolutionUnit 0x0128 Short 1 2
|
||||
======
|
||||
After
|
||||
Exif.Image.NewSubfileType 0x00fe Long 1 0
|
||||
Exif.Image.ImageWidth 0x0100 Short 1 9
|
||||
Exif.Image.ImageLength 0x0101 Short 1 9
|
||||
Exif.Image.BitsPerSample 0x0102 Short 3 8 8 8
|
||||
Exif.Image.Compression 0x0103 Short 1 1
|
||||
Exif.Image.PhotometricInterpretation 0x0106 Short 1 2
|
||||
Exif.Image.DocumentName 0x010d Ascii 24 /home/ahuggel/mini9.tif
|
||||
Exif.Image.ImageDescription 0x010e Ascii 18 Created with GIMP
|
||||
Exif.Image.StripOffsets 0x0111 Long 1 8
|
||||
Exif.Image.Orientation 0x0112 Short 1 1
|
||||
Exif.Image.SamplesPerPixel 0x0115 Short 1 3
|
||||
Exif.Image.RowsPerStrip 0x0116 Short 1 64
|
||||
Exif.Image.StripByteCounts 0x0117 Long 1 243
|
||||
Exif.Image.XResolution 0x011a Rational 1 1207959552/16777216
|
||||
Exif.Image.YResolution 0x011b Rational 1 1207959552/16777216
|
||||
Exif.Image.PlanarConfiguration 0x011c Short 1 1
|
||||
Exif.Image.ResolutionUnit 0x0128 Short 1 2
|
||||
Exif.Photo.DateTimeOriginal 0x9003 Ascii 18 Yesterday at noon
|
||||
"""
|
||||
]
|
||||
stderr = [""]
|
||||
retval = [0]
|
@ -0,0 +1,42 @@
|
||||
## Writing new tests
|
||||
|
||||
The test suite is intended to run a binary and compare its standard output,
|
||||
standard error and return value against provided values. This is implemented
|
||||
using Python's `unittest` module and thus all test files are Python files.
|
||||
|
||||
The simplest test has the following structure:
|
||||
``` python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class GoodTestName(system_tests.Case):
|
||||
|
||||
filename = "{data_path}/test_file"
|
||||
commands = ["{exiv2} " + filename, "{exiv2} " + filename + '_2']
|
||||
stdout = [""] * 2
|
||||
stderr = ["""{exiv2_exception_msg} """ + filename + """:
|
||||
{error_58_message}
|
||||
"""] * 2
|
||||
retval = [1] * 2
|
||||
```
|
||||
|
||||
The test suite will run the provided commands in `commands` and compare them to
|
||||
the output in `stdout` and `stderr` and it will compare the return values.
|
||||
|
||||
The strings in curly braces are variables either defined in this test's class or
|
||||
are taken from the suite's configuration file (see `doc.md` for a complete
|
||||
explanation).
|
||||
|
||||
When creating new tests, follow roughly these steps:
|
||||
|
||||
1. Choose an appropriate subdirectory where the test belongs. If none fits
|
||||
create a new one and put an empty `__init__.py` file there.
|
||||
|
||||
2. Create a new file with a name matching `test_*.py`. Copy the class definition
|
||||
from the above example and choose an appropriate class name.
|
||||
|
||||
3. Run the test suite via `python3 runner.py` and ensure that your test case is
|
||||
actually run! Either run the suite with the `-v` option which will output all
|
||||
test cases that were run or simply add an error and check if errors occur.
|
Loading…
Reference in New Issue