forked from avocado-framework-tests/avocado-misc-tests
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathperf_basic.py
More file actions
132 lines (109 loc) Β· 4.82 KB
/
perf_basic.py
File metadata and controls
132 lines (109 loc) Β· 4.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2018 IBM.
# Author: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
# Author: Nageswara R Sastry <rnsastry@linux.vnet.ibm.com>
import os
import re
import tempfile
from avocado import Test
from avocado.utils import process, distro, dmesg
from avocado.utils.software_manager.manager import SoftwareManager
class PerfBasic(Test):
"""
Performance analysis tools for Linux
:avocado: tags=privileged,perf
execute basic perf testcases:
- help
- version
- 'list' -> List all symbolic event types
- 'record' -> Run a command and record its profile into perf.data
- 'report' -> Read perf.data (created by perf record) and display
the profile
execute perf commands:
- 'kallsyms' -> Searches running kernel for symbols
- 'annotate' -> Read perf.data (created by perf record) and display
annotated code
- 'evlist' -> List the event names in a perf.data file
- 'script' -> Read perf.data (created by perf record) and display
trace output
- 'stat' -> Run a command and gather performance counter statistics
- 'bench' -> General framework for benchmark suites
"""
fail_cmd = list()
def run_cmd(self, cmd, verbose=True):
self.log.info("executing ============== %s =================", cmd)
if process.system(cmd, verbose=verbose, ignore_status=True,
sudo=True, shell=True):
self.fail("perf: failed to execute command %s" % cmd)
dmesg.collect_errors_dmesg(['WARNING: CPU:', 'Oops', 'segfault',
'soft lockup', 'SIGSEGV', 'core', 'dumped',
'Segmentation fault'])
def setUp(self):
smg = SoftwareManager()
dist = distro.detect()
if dist.name in ['Ubuntu']:
linux_tools = "linux-tools-" + os.uname()[2]
pkgs = [linux_tools]
if dist.name in ['Ubuntu']:
pkgs.extend(['linux-tools-common'])
elif dist.name in ['debian']:
pkgs = ['linux-perf']
elif dist.name in ['centos', 'fedora', 'rhel', 'SuSE']:
pkgs = ['perf']
else:
self.cancel("perf is not supported on %s" % dist.name)
self.temp_file = tempfile.NamedTemporaryFile().name
dmesg.clear_dmesg()
for pkg in pkgs:
if not smg.check_installed(pkg) and not smg.install(pkg):
self.cancel(
"Package %s is missing/could not be installed" % pkg)
def test_perf_help(self):
self.run_cmd("perf --help", False)
def test_perf_version(self):
output = process.run("perf --version", sudo=True, shell=True)
if not re.search(r'\d', output.stdout.decode()):
self.fail("perf: failed to execute command perf --version")
def test_perf_version_test(self):
output = process.run("perf --version -test", ignore_status=True,
sudo=True, shell=True)
if output.exit_status == -11:
self.fail("perf --version -test command segfaulted")
def test_perf_list(self):
self.run_cmd("perf list", False)
def test_perf_record(self):
self.run_cmd("perf record -o %s -a sleep 5" % self.temp_file)
if os.path.exists(self.temp_file):
if not os.stat(self.temp_file).st_size:
self.fail("%s sample not captured" % self.temp_file)
else:
self.run_cmd("perf report --stdio -i %s" % self.temp_file)
def test_perf_cmd_kallsyms(self):
self.run_cmd("perf kallsyms __schedule")
def test_perf_cmd_annotate(self):
self.run_cmd("perf record -o %s -a sleep 1" % self.temp_file)
self.run_cmd("perf annotate --stdio -i %s" % self.temp_file)
def test_perf_cmd_evlist(self):
self.run_cmd("perf record -o %s -a sleep 1" % self.temp_file)
self.run_cmd("perf evlist -v -i %s" % self.temp_file)
def test_perf_cmd_script(self):
self.run_cmd("perf record -o %s -a sleep 1" % self.temp_file)
self.run_cmd("perf script -i %s" % self.temp_file)
def test_perf_stat(self):
self.run_cmd("perf stat -a sleep 5")
def test_perf_bench(self):
self.run_cmd("perf bench sched all")
def tearDown(self):
if os.path.isfile(self.temp_file):
process.run('rm -f %s' % self.temp_file)