Optimize even further Stats.accumulate as it's a hotspot: don't create a new Stats...
[iotop.git] / iotop / data.py
blob3a64531adf899712b8c1e0715dd6dc5aa2e232ea
1 import errno
2 import glob
3 import os
4 import pprint
5 import pwd
6 import socket
7 import stat
8 import struct
9 import sys
10 import time
12 from iotop import ioprio, vmstat
13 from netlink import Connection, NETLINK_GENERIC, U32Attr, NLM_F_REQUEST
14 from genetlink import Controller, GeNlMessage
17 # Check for requirements:
18 # o Python >= 2.5 for AF_NETLINK sockets
19 # o Linux >= 2.6.20 with I/O accounting
21 try:
22 socket.NETLINK_ROUTE
23 python25 = True
24 except AttributeError:
25 python25 = False
27 ioaccounting = os.path.exists('/proc/self/io')
29 if not python25 or not ioaccounting:
30 def boolean2string(boolean):
31 return boolean and 'Found' or 'Not found'
32 print 'Could not run iotop as some of the requirements are not met:'
33 print '- Python >= 2.5 for AF_NETLINK support:', boolean2string(python25)
34 print '- Linux >= 2.6.20 with I/O accounting support ' \
35 '(CONFIG_TASKSTATS, CONFIG_TASK_DELAY_ACCT, ' \
36 'CONFIG_TASK_IO_ACCOUNTING):', \
37 boolean2string(ioaccounting)
38 sys.exit(1)
40 class DumpableObject(object):
41 """Base class for all objects that allows easy introspection when printed"""
42 def __repr__(self):
43 return '%s: %s>' % (str(type(self))[:-1], pprint.pformat(self.__dict__))
47 # Interesting fields in a taskstats output
50 class Stats(DumpableObject):
51 members_offsets = [
52 ('blkio_delay_total', 40),
53 ('swapin_delay_total', 56),
54 ('read_bytes', 248),
55 ('write_bytes', 256),
56 ('cancelled_write_bytes', 264)
59 has_blkio_delay_total = False
61 def __init__(self, task_stats_buffer):
62 for name, offset in Stats.members_offsets:
63 data = task_stats_buffer[offset:offset + 8]
64 setattr(self, name, struct.unpack('Q', data)[0])
66 # This is a heuristic to detect if CONFIG_TASK_DELAY_ACCT is enabled in
67 # the kernel.
68 if not Stats.has_blkio_delay_total:
69 Stats.has_blkio_delay_total = self.blkio_delay_total != 0
71 def accumulate(self, other_stats, destination, operator=sum):
72 """Update destination from operator(self, other_stats)"""
73 for name, offset in Stats.members_offsets:
74 self_value = self.__dict__[name]
75 other_value = other_stats.__dict__[name]
76 destination.__dict__[name] = operator((self_value, other_value))
78 def delta(self, other_stats, destination):
79 """Update destination with self - other_stats"""
80 def subtract((me, other)):
81 return me - other
82 return self.accumulate(other_stats, destination, operator=subtract)
84 def is_all_zero(self):
85 for name, offset in Stats.members_offsets:
86 if getattr(self, name) != 0:
87 return False
88 return True
90 @staticmethod
91 def build_all_zero():
92 stats = Stats.__new__(Stats)
93 for name, offset in Stats.members_offsets:
94 setattr(stats, name, 0)
95 return stats
98 # Netlink usage for taskstats
101 TASKSTATS_CMD_GET = 1
102 TASKSTATS_CMD_ATTR_PID = 1
104 class TaskStatsNetlink(object):
105 # Keep in sync with format_stats() and pinfo.did_some_io()
107 def __init__(self, options):
108 self.options = options
109 self.connection = Connection(NETLINK_GENERIC)
110 controller = Controller(self.connection)
111 self.family_id = controller.get_family_id('TASKSTATS')
113 def get_single_task_stats(self, pid):
114 request = GeNlMessage(self.family_id, cmd=TASKSTATS_CMD_GET,
115 attrs=[U32Attr(TASKSTATS_CMD_ATTR_PID, pid)],
116 flags=NLM_F_REQUEST)
117 request.send(self.connection)
118 try:
119 reply = self.connection.recv()
120 except OSError, e:
121 if e.errno == errno.ESRCH:
122 # OSError: Netlink error: No such process (3)
123 return
124 raise
125 if len(reply.payload) < 292:
126 # Short reply
127 return
128 reply_data = reply.payload[20:]
130 reply_length, reply_type = struct.unpack('HH', reply.payload[4:8])
131 reply_version = struct.unpack('H', reply.payload[20:22])[0]
132 assert reply_length >= 288
133 assert reply_type == TASKSTATS_CMD_ATTR_PID + 3
134 assert reply_version >= 4
135 return Stats(reply_data)
138 # PIDs manipulations
141 def find_uids(options):
142 """Build options.uids from options.users by resolving usernames to UIDs"""
143 options.uids = []
144 error = False
145 for u in options.users or []:
146 try:
147 uid = int(u)
148 except ValueError:
149 try:
150 passwd = pwd.getpwnam(u)
151 except KeyError:
152 print >> sys.stderr, 'Unknown user:', u
153 error = True
154 else:
155 uid = passwd.pw_uid
156 if not error:
157 options.uids.append(uid)
158 if error:
159 sys.exit(1)
161 def safe_utf8_decode(s):
162 try:
163 return s.decode('utf-8')
164 except UnicodeDecodeError:
165 return s.encode('string_escape')
167 class ThreadInfo(DumpableObject):
168 """Stats for a single thread"""
169 def __init__(self, tid):
170 self.tid = tid
171 self.mark = True
172 self.stats_total = None
173 self.stats_delta = Stats.__new__(Stats)
175 def get_ioprio(self):
176 return ioprio.get(self.tid)
178 def update_stats(self, stats):
179 if not self.stats_total:
180 self.stats_total = stats
181 stats.delta(self.stats_total, self.stats_delta)
182 self.stats_total = stats
185 class ProcessInfo(DumpableObject):
186 """Stats for a single process (a single line in the output): if
187 options.processes is set, it is a collection of threads, otherwise a single
188 thread."""
189 def __init__(self, pid):
190 self.pid = pid
191 self.uid = None
192 self.user = None
193 self.threads = {} # {tid: ThreadInfo}
194 self.stats_delta = Stats.build_all_zero()
195 self.stats_accum = Stats.build_all_zero()
196 self.stats_accum_timestamp = time.time()
198 def is_monitored(self, options):
199 if (options.pids and not options.processes and
200 self.pid not in options.pids):
201 # We only monitor some threads, not this one
202 return False
204 if options.uids and self.get_uid() not in options.uids:
205 # We only monitor some users, not this one
206 return False
208 return True
210 def get_uid(self):
211 if self.uid:
212 return self.uid
213 # uid in (None, 0) means either we don't know the UID yet or the process
214 # runs as root so it can change its UID. In both cases it means we have
215 # to find out its current UID.
216 try:
217 uid = os.stat('/proc/%d' % self.pid)[stat.ST_UID]
218 except OSError:
219 # The process disappeared
220 uid = None
221 if uid != self.uid:
222 # Maybe the process called setuid()
223 self.user = None
224 return uid
226 def get_user(self):
227 uid = self.get_uid()
228 if uid is not None and not self.user:
229 try:
230 self.user = safe_utf8_decode(pwd.getpwuid(uid).pw_name)
231 except KeyError:
232 self.user = str(uid)
233 return self.user or '{none}'
235 def get_proc_status_name(self):
236 try:
237 proc_status = open('/proc/%d/status' % self.pid)
238 except IOError:
239 return '{no such process}'
240 first_line = proc_status.readline()
241 prefix = 'Name:\t'
242 if first_line.startswith(prefix):
243 name = first_line[6:].strip()
244 else:
245 name = ''
246 if name:
247 name = '[%s]' % name
248 else:
249 name = '{no name}'
250 return name
252 def get_cmdline(self):
253 # A process may exec, so we must always reread its cmdline
254 try:
255 proc_cmdline = open('/proc/%d/cmdline' % self.pid)
256 cmdline = proc_cmdline.read(4096)
257 except IOError:
258 return '{no such process}'
259 if not cmdline:
260 # Probably a kernel thread, get its name from /proc/PID/status
261 return self.get_proc_status_name()
262 parts = cmdline.split('\0')
263 if parts[0].startswith('/'):
264 first_command_char = parts[0].rfind('/') + 1
265 parts[0] = parts[0][first_command_char:]
266 cmdline = ' '.join(parts).strip()
267 return safe_utf8_decode(cmdline)
269 def did_some_io(self, accumulated):
270 if accumulated:
271 return not self.stats_accum.is_all_zero()
272 return not all(t.stats_delta.is_all_zero() for
273 t in self.threads.itervalues())
275 def get_ioprio(self):
276 priorities = set(t.get_ioprio() for t in self.threads.itervalues())
277 if len(priorities) == 1:
278 return priorities.pop()
279 return '?'
281 def ioprio_sort_key(self):
282 return ioprio.sort_key(self.get_ioprio())
284 def get_thread(self, tid):
285 thread = self.threads.get(tid, None)
286 if not thread:
287 thread = ThreadInfo(tid)
288 self.threads[tid] = thread
289 return thread
291 def update_stats(self):
292 stats_delta = Stats.build_all_zero()
293 for tid, thread in self.threads.items():
294 if thread.mark:
295 del self.threads[tid]
296 else:
297 stats_delta.accumulate(thread.stats_delta, stats_delta)
299 nr_threads = len(self.threads)
300 if not nr_threads:
301 return False
303 stats_delta.blkio_delay_total /= nr_threads
304 stats_delta.swapin_delay_total /= nr_threads
306 self.stats_delta = stats_delta
307 self.stats_accum.accumulate(self.stats_delta, self.stats_accum)
309 return True
311 class ProcessList(DumpableObject):
312 def __init__(self, taskstats_connection, options):
313 # {pid: ProcessInfo}
314 self.processes = {}
315 self.taskstats_connection = taskstats_connection
316 self.options = options
317 self.timestamp = time.time()
318 self.vmstat = vmstat.VmStat()
320 # A first time as we are interested in the delta
321 self.update_process_counts()
323 def get_process(self, pid):
324 """Either get the specified PID from self.processes or build a new
325 ProcessInfo if we see this PID for the first time"""
326 process = self.processes.get(pid, None)
327 if not process:
328 process = ProcessInfo(pid)
329 self.processes[pid] = process
331 if process.is_monitored(self.options):
332 return process
334 def list_tgids(self):
335 if self.options.pids:
336 for pid in self.options.pids:
337 yield pid
339 pattern = '/proc/[0-9]*'
340 if not self.options.processes:
341 pattern += '/task/*'
343 for path in glob.iglob(pattern):
344 yield int(os.path.basename(path))
346 def list_tids(self, tgid):
347 if not self.options.processes:
348 return [tgid]
350 try:
351 tids = map(int, os.listdir('/proc/%d/task' % tgid))
352 except OSError:
353 return []
355 if self.options.pids:
356 tids = list(set(self.options.pids).intersection(set(tids)))
358 return tids
360 def update_process_counts(self):
361 new_timestamp = time.time()
362 self.duration = new_timestamp - self.timestamp
363 self.timestamp = new_timestamp
365 for tgid in self.list_tgids():
366 process = self.get_process(tgid)
367 if not process:
368 continue
369 for tid in self.list_tids(tgid):
370 thread = process.get_thread(tid)
371 stats = self.taskstats_connection.get_single_task_stats(tid)
372 if stats:
373 thread.update_stats(stats)
374 thread.mark = False
376 return self.vmstat.delta()
378 def refresh_processes(self):
379 for process in self.processes.itervalues():
380 for thread in process.threads.itervalues():
381 thread.mark = True
383 total_read_and_write = self.update_process_counts()
385 for pid, process in self.processes.items():
386 if not process.update_stats():
387 del self.processes[pid]
389 return total_read_and_write
391 def clear(self):
392 self.processes = {}