-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdgroup_analyze-MPR.py
More file actions
1451 lines (1392 loc) · 59.7 KB
/
dgroup_analyze-MPR.py
File metadata and controls
1451 lines (1392 loc) · 59.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import division
# dgroup_analyze-MPR.py
#
import argparse
import os
import sys
import time
import logging
import pprint
import re
import ConfigParser
from operator import itemgetter
from collections import Counter
import csv
import subprocess
from datetime import datetime, date
import socket
from collections import defaultdict
from multiprocessing import Process
from itertools import repeat
import networkx as nx
# TODO from itertools import combinations
from mypytools import mean, stdev, variance
from garbology import EdgeInfoReader, ObjectInfoReader, DeathGroupsReader, ContextCountReader, get_index
pp = pprint.PrettyPrinter( indent = 4 )
def setup_logger( targetdir = ".",
filename = "dgroup_analyze-MPR.log",
logger_name = 'dgroup_analyze-MPR',
debugflag = 0 ):
# Set up main logger
logger = logging.getLogger( logger_name )
formatter = logging.Formatter( '[%(funcName)s] : %(message)s' )
filehandler = logging.FileHandler( os.path.join( targetdir, filename ) , 'w' )
if debugflag:
logger.setLevel( logging.DEBUG )
filehandler.setLevel( logging.DEBUG )
else:
filehandler.setLevel( logging.ERROR )
logger.setLevel( logging.ERROR )
filehandler.setFormatter( formatter )
logger.addHandler( filehandler )
return logger
def debug_lifetimes( G, cycle, bmark, logger ):
global pp
for x in cycle:
if G.node[x]["lifetime"] <= 0:
n = G.node[x]
# print "XXX %s: [ %d - %s ] lifetime: %d" % \
# (bmark, x, n["type"], n["lifetime"])
logger.critical( "XXX: [ %d - %s ] lifetime: %d" %
(x, n["type"], n["lifetime"]) )
def get_trace_fp( tracefile = None,
logger = None ):
if not os.path.isfile( tracefile ) and not os.path.islink( tracefile ):
# File does not exist
logger.error( "Unable to open %s" % str(tracefile) )
print "Unable to open %s" % str(tracefile)
exit(21)
bz2re = re.compile( "(.*)\.bz2$", re.IGNORECASE )
gzre = re.compile( "(.*)\.gz$", re.IGNORECASE )
bz2match = bz2re.search( tracefile )
gzmatch = gzre.search( tracefile )
if bz2match:
# bzip2 file
fp = subprocess.Popen( [ "bzcat", tracefile ],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE ).stdout
elif gzmatch:
# gz file
fp = subprocess.Popen( [ "zcat", tracefile ],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE ).stdout
else:
fp = open( tracefile, "r")
return fp
#
# Main processing
#
def get_types( G, cycle ):
return [ G.node[x]["type"] for x in cycle ]
def get_types_and_save_index( G, cycle ):
return [ (x, G.node[x]["type"]) for x in cycle ]
def get_lifetimes( G, cycle ):
return [ G.node[x]["lifetime"] for x in cycle ]
def get_lifetimes_debug( G, cycle ):
result = []
for x in cycle:
try:
mynode = G.node[x]
except:
print "Unable to get node[ %d ]" % x
continue
try:
mylifetime = mynode["lifetime"]
except:
print "Unable to get lifetime for node[ %d ] -> %s" % (x, str(mynode))
continue
result.append(mylifetime)
return result
def get_sizes( G, cycle ):
return [ G.node[x]["size"] for x in cycle ]
def get_summary( summary_path ):
start = False
done = False
summary = []
with open(summary_path) as fp:
for line in fp:
line = line.rstrip()
if line.find("---------------[ SUMMARY INFO") == 0:
start = True if not start else False
if start:
continue
else:
done = True
break
if start:
row = line.split(",")
row[1] = int(row[1])
summary.append(row)
assert(done)
return dict(summary)
def get_edge_info( edgeinfo_path ):
start = False
done = False
edge_info = {}
with open(edgeinfo_path) as fp:
# Map edge (src,tgt) -> (alloctime, deathtime)
for line in fp:
line = line.rstrip()
if line.find("---------------[ EDGE INFO") == 0:
start = True if not start else False
if start:
continue
else:
done = True
break
if start:
rowtmp = line.split(",")
row = tuple([ int(x) for x in rowtmp[2:] ])
edge_info[ (int(rowtmp[0]), int(rowtmp[1])) ] = row
assert(done)
return edge_info
def get_cycles( tgtpath ):
global pp
with open(tgtpath) as fp:
start = False
done = False
cycles = []
for line in fp:
line = line.rstrip()
if line.find("---------------[ CYCLES") == 0:
start = not start
if start:
continue
else:
done = True
break
if start:
line = line.rstrip(",")
row = line.split(",")
row = [ int(x) for x in row ]
cycles.append(row)
assert(done)
return cycles
g_regex = re.compile( "([^\$]+)\$(.*)" )
def is_inner_class( mytype ):
global g_regex
m = g_regex.match(mytype)
return True if m else False
def row_to_string( row ):
result = None
strout = StringIO.StringIO()
csvwriter = csv.writer(strout)
# Is the list comprehension necessary? Doesn't seem like it.
csvwriter.writerow( [ x for x in row ] )
result = strout.getvalue()
strout.close()
return result.replace("\r", "")
def render_histogram( histfile = None,
title = None ):
outpng = histfile + ".png"
cmd = [ "/data/rveroy/bin/Rscript",
"/data/rveroy/pulsrc/etanalyzer/Rgraph/histogram.R", # TODO Hard coded for now.
# Put into config. TODO TODO TODO
histfile, outpng,
"800", "800",
title, ]
print "Running histogram.R on %s -> %s" % (histfile, outpng)
print "[ %s ]" % cmd
renderproc = subprocess.Popen( cmd,
stdout = subprocess.PIPE,
stdin = subprocess.PIPE,
stderr = subprocess.PIPE )
result = renderproc.communicate()
print "--------------------------------------------------------------------------------"
for x in result:
print x
print "--------------------------------------------------------------------------------"
def write_histogram( results = None,
tgtbase = None,
title = None ):
# TODO Use a list and a for loop to refactor.
tgtpath_totals = tgtbase + "-totals.csv"
tgtpath_cycles = tgtbase + "-cycles.csv"
tgtpath_types = tgtbase + "-types.csv"
with open(tgtpath_totals, 'wb') as fp_totals, \
open(tgtpath_cycles, 'wb') as fp_cycles, \
open(tgtpath_types, 'wb') as fp_types:
# TODO REFACTOR into a loop
# TODO 2015-1103 - RLV TODO
header = [ "benchmark", "total" ]
csvw = {}
csvw["totals"] = csv.writer( fp_totals,
quotechar = '"',
quoting = csv.QUOTE_NONNUMERIC )
csvw["largest_cycle"] = csv.writer( fp_cycles,
quotechar = '"',
quoting = csv.QUOTE_NONNUMERIC )
csvw["largest_cycle_types_set"] = csv.writer( fp_types,
quotechar = '"',
quoting = csv.QUOTE_NONNUMERIC )
keys = csvw.keys()
dframe = {}
for key in keys:
csvw[key].writerow( header )
dframe[key] = []
for benchmark, infodict in results.iteritems():
for key in keys:
assert( key in infodict )
for item in infodict[key]:
row = [ benchmark, item ] if key == "totals" \
else [ benchmark, len(item) ]
dframe[key].append(row)
sorted_result = [ (key, sorted( dframe[key], key = itemgetter(0) )) for key in keys ]
for key, result in sorted_result:
for csvrow in result:
csvw[key].writerow( csvrow )
# TODO TODO TODO TODO
# TODO TODO TODO: SPAWN OFF THREAD
# TODO TODO TODO TODO
render_histogram( histfile = tgtpath_totals,
title = title )
render_histogram( histfile = tgtpath_cycles,
title = title )
render_histogram( histfile = tgtpath_types,
title = title )
def output_R( benchmark = None ):
pass
# Need benchmark.
# TODO: Do we need this?
def create_work_directory( work_dir,
thishost = "",
today = "",
timenow = "",
logger = None, interactive = False ):
os.chdir( work_dir )
# Check to see host name directory ----------------------------------------
if os.path.isfile(thishost):
print "%s is a file, NOT a directory." % thishost
exit(11)
if not os.path.isdir( thishost ):
os.mkdir( thishost )
print "WARNING: %s directory does not exist. Creating it" % thishost
logger.warning( "WARNING: %s directory exists." % thishost )
if interactive:
raw_input("Press ENTER to continue:")
else:
print "....continuing!!!"
os.chdir( thishost )
# Check today directory ---------------------------------------------------
if os.path.isfile(today):
print "Can not create %s as directory." % today
exit(11)
if not os.path.isdir( today ):
os.mkdir( today )
else:
print "WARNING: %s directory exists." % today
logger.warning( "WARNING: %s directory exists." % today )
if interactive:
raw_input("Press ENTER to continue:")
else:
print "....continuing!!!"
os.chdir( today )
# Check timenow directory -------------------------------------------------
if os.path.isfile(timenow):
print "Can not create %s as directory." % timenow
exit(11)
if not os.path.isdir( timenow ):
os.mkdir( timenow )
else:
print "WARNING: %s directory exists." % timenow
logger.warning( "WARNING: %s directory exists." % timenow )
if interactive:
raw_input("Press ENTER to continue:")
else:
print "....continuing!!!"
os.chdir( timenow )
return str(os.getcwd())
def save_interesting_small_cycles( largest_scc, summary ):
# Interesting is defined to be 4 or smaller
length = len(largest_scc)
if length > 0 and length <= 4:
summary["by_size"][length].append( largest_scc )
def get_last_edge_from_result( edge_list ):
ledge = edge_list[0]
latest = ledge[4]
for newedge in edge_list[1:]:
if newedge[4] > latest:
ledge = newedge
return ledge
def print_summary( summary ):
global pp
for bmark, fdict in summary.iteritems():
print "[%s]:" % bmark
for key, value in fdict.iteritems():
if key == "by_size":
continue
if key == "types" or key == "sbysize":
print " [%s]: %s" % (key, pp.pformat(value))
else:
print " [%s]: %d" % (key, value)
def skip_benchmark(bmark):
return ( bmark == "tradebeans" or # Permanent ignore
bmark == "tradesoap" or # Permanent ignore
bmark != "xalan"
# bmark == "lusearch" or
# ( bmark != "batik" and
# bmark != "lusearch" and
# bmark != "luindex" and
# bmark != "specjbb" and
# bmark != "avrora" and
# bmark != "tomcat" and
# bmark != "pmd" and
# bmark != "fop"
# )
)
def with_primitive_array( typeset = set([]) ):
typelist = list(typeset)
arre = re.compile("^\[[CIJ]")
m1 = arre.search(typelist[1])
m0 = arre.search(typelist[0])
if ( (typelist[0].find("[L") == 0) and
(m1 != None) ):
return typelist[0]
elif ( (typelist[1].find("[L") == 0) and
(m0 != None) ):
return typelist[1]
return None
def fixed_known_key_objects( group = [],
objinfo = None,
logger = None ):
# Get types
typeset = set( [ objinfo.get_type(x) for x in group ] )
logger.debug( "Checking group set: %s" % str(list(typeset)) )
# Check against known groups
if typeset == set( [ "[C", "Ljava/lang/String;" ] ):
logger.debug( "Matches [C - String" )
obj = None
for x in group:
if objinfo.get_type(x) == "Ljava/lang/String;":
obj = x
break
assert(obj != None)
return { "key" : "Ljava/lang/String;",
"obj" : obj }
elif ( (len(typeset) == 2) and
(with_primitive_array(typeset) != None) ):
mytype = with_primitive_array(typeset)
obj = None
for x in group:
if objinfo.get_type(x) == mytype:
obj = x
break
assert(obj != None)
return { "key" : "Ljava/lang/String;",
"obj" : obj }
return None
def find_dupes( dgroups = None):
count = 0 # Count for printing out debug progress marks
# dash = 0 # Also for deub progress hash marks
revdict = {}
for groupnum, grouplist in dgroups.iteritems():
#if count % 100 == 99:
# sys.stdout.write("-/")
# dash += 1
# if dash % 41 == 40:
# sys.stdout.write('\n')
count += 1
for mem in grouplist:
if mem in revdict:
revdict[mem].append( groupnum )
else:
revdict[mem] = [ groupnum ]
# sys.stdout.write("\n")
dupes = {}
for objId, grlist in revdict.iteritems():
if len(grlist) > 1:
dupes[objId] = grlist
return dupes
def get_last_edge_record( group, edgeinfo, objectinfo ):
latest = 0 # Time of most recent
srclist = []
tgt = 0
for obj in group:
rec = edgeinfo.get_last_edge_record(obj)
if rec == None:
# No last edge
# TODO Root object?
assert( objectinfo.died_by_stack(obj) )
elif rec["dtime"] > latest:
latest = rec["dtime"]
srclist = rec["lastsources"]
tgt = obj
return { "dtime" : latest,
"lastsources" : srclist,
"target" : tgt }
def is_array( mytype ):
return (len(mytype) > 0) and (mytype[0] == "[")
def debug_primitive_key( group = None,
keytype = None,
keyId = None,
objinfo = None,
logger = None ):
print " -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-"
print " >>> PRIMITIVE KEY: %s" % keytype
for x in group:
tmp = objinfo.get_record(x)
print " %d [ %s ][ by %s ] - %d" % \
(x, objinfo.get_type(x), tmp[ get_index("DIEDBY") ], tmp[ get_index("DTIME") ])
print " -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-"
def debug_multiple_keys( group = None,
key_objects = None,
objinfo = None,
logger = None ):
print " -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-"
print " >>> MULTIPLE KEY DEBUG:"
print " [KEYS]"
for x in key_objects:
tmp = objinfo.get_record(x)
print " %d [ %s ][ by %s ] - %d" % \
(x, objinfo.get_type(x), tmp[ get_index("DIEDBY") ], tmp[ get_index("DTIME") ])
print " Others:", str( list(set([ objinfo.get_type(x) for x in group if x not in key_objects ])) )
print " -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-"
def is_primitive_type( mytype = None ):
return ( (mytype == "Z") # boolean
or (mytype == "B") # byte
or (mytype == "C") # char
or (mytype == "D") # double
or (mytype == "F") # float
or (mytype == "I") # int
or (mytype == "J") # long
or (mytype == "S") # short
)
def is_primitive_array( mytype = None ):
# Is it an array?
if not is_array(mytype):
return False
else:
return ( is_primitive_type(mytype[1:]) or
is_primitive_array(mytype[1:]) )
# Return true if all objects in group are:
# - primitive
# - primitive arrays
def all_primitive_types( group = [],
objinfo = None ):
for obj in group:
mytype = objinfo.get_type(obj)
if not is_primitive_type(mytype) and not is_primitive_array(mytype):
return False
return True
def get_earliest_alloctime_object( group = [],
objinfo = None ):
if len(group) == 0:
return None
cur = group[0]
currec = objinfo.get_record(cur)
cur_atime = currec[ get_index("ATIME") ]
# dtime = rec[ get_index("DTIME") ]
for obj in group:
tmp = group[0]
tmprec = objinfo.get_record(tmp)
tmp_atime = tmprec[ get_index("ATIME") ]
if tmp_atime < cur_atime:
cur = tmp
currec = tmprec
cur_atime = tmp_atime
return cur
def get_most_likely_keytype( objlist = [],
tydict = {} ):
blacklist = set([ "Ljava/lang/String;", ])
assert(len(objlist)) > 0
newlist = list( set( [ x for x in objlist
if (tydict[x] not in blacklist or is_primitive_type(tydict[x])) ] ) )
if len(newlist) > 1:
# Let's return the oldest object
newlist = sorted(newlist)
if len(newlist) > 0:
return newlist[0]
# Something in the blacklist is the key object
newlist = list( set( [ x for x in objlist
if is_primitive_type(tydict[x]) ] ) )
if len(newlist) > 1:
# Let's return the oldest object
newlist = sorted(newlist)
if len(newlist) > 0:
return newlist[0]
# What does this mean?
print "--------------------------------------------------------------------------------"
print "DEBUG: blacklist doesn't work for -->"
print str(tydict)
print "--------------------------------------------------------------------------------"
return objlist[0]
def update_keytype_dict( ktdict = {},
objId = -1,
objType = "",
group = [],
objinfo = None,
contextinfo = None,
group_types = frozenset([]),
max_age = 0,
dumpall = False,
filterbytes = 8388608,
writer = None,
logger = None ):
assert( objId >= 0 )
if dumpall:
assert( writer != None )
if objinfo.died_at_end(objId):
# We ignore immortal objects that died at the END.
# TODO TODO TODO: Shouldn't this be something like DIED_AT_END?
return DIEDBYSTACK
# TODO TODO TODO
grouplen = len(group)
early_obj = get_earliest_alloctime_object( group = group, objinfo = objinfo )
true_key_flag = (early_obj == objId)
if objType in ktdict:
ktdict[objType]["max"] = max( grouplen, ktdict[objType]["max"] )
ktdict[objType]["min"] = min( grouplen, ktdict[objType]["min"] )
ktdict[objType]["grouplen_list"].append( grouplen )
ktdict[objType]["total"] += 1
ktdict[objType]["group_types"].update( [ group_types ] )
ktdict[objType]["allocsites"].update( [ objinfo.get_allocsite(objId) ] )
if true_key_flag:
ktdict[objType]["true_key_count"] += 1
else:
ktdict[objType] = { "total" : 1,
"max" : grouplen,
"min" : grouplen,
"grouplen_list" : [ grouplen ],
"is_array": is_array(objType),
"group_types" : Counter( [ group_types ] ),
"true_key_count" : 1 if true_key_flag else 0,
"allocsites" : Counter( [ objinfo.get_allocsite(objId) ] ), }
# Also update the context information
cpair = objinfo.get_death_context( objId )
if dumpall:
# Header is [ "type", "time", "context1", "context2",
# "number of objects", "cause", "subcause",
# "allocsite", "age_methup", "age_alloc" ]
if ( (filterbytes == 0) or
(filterbytes > 0) and (max_age <= filterbytes) ):
rec = objinfo.get_record(objId)
dcause = objinfo.get_death_cause_using_record(rec)
subcause = ( objinfo.get_stack_died_by_attr_using_record(rec) if dcause == "S"
else ( objinfo.get_last_heap_update_using_record(rec)
if (dcause == "H" or dcause == "G") else "NONE" ) )
age_methup = objinfo.get_age_using_record(rec)
age_alloc = objinfo.get_age_using_record_ALLOC(rec)
writer.writerow( [ objType,
objinfo.get_death_time_using_record(rec),
cpair[0], # death context pair first element
cpair[1], # death context pair second element
# TODO: which direction does the context pair go?
grouplen,
dcause,
subcause,
# TODO: Use a context pair for allocation site too?
objinfo.get_allocsite_using_record(rec),
age_methup,
age_alloc, ] )
else:
logger.debug( "Object [%s](%d) IGNORED." % (objType, objId) )
result = contextinfo.inc_key_count( context_pair = cpair,
objType = objType )
if result == None:
return False
elif not result:
sys.stdout.write( "ERR: objId[%d] -> %s" % (objId, str(cpair)) )
return False
return True
ONEKEY = 1
MULTKEY = 2
NOKEY = 3
ONEKEY_LASTEDGE = 4
ONEKEY_KNOWNOBJ = 5
DIEDBYSTACK = 7
DIEDATEND = 8
NOTFOUND = 9
def get_key_object_types( gnum = None,
ktdict = {},
dgroups = None,
edgeinfo = None,
objinfo = None,
contextinfo = None,
contextresult = {},
dumpall = False,
filterbytes = 8388608,
logger = None,
writer = None,
dgraph = None,
ignore_died_at_end = True ):
if gnum in dgroups.group2list:
group = dgroups.group2list[gnum]
else:
return NOTFOUND # TODO What should return be? None?
# Check if any of the group is a key object
key_objects = []
max_age = 0
for xtmp in group:
if objinfo.is_key_object(xtmp):
key_objects.append(xtmp)
max_age = max( max_age, objinfo.get_age_ALLOC(xtmp) )
found_key = False
used_last_edge = False
print " - grouplen: %d" % len(group)
# Check to see if the key object is a primitive type array
total_cc = 0
err_cc = 0
# ======================================================================
if all_primitive_types( group, objinfo ):
print " - all primitive types."
# All are a group unto themselves
for obj in group:
# NOTE: Ignoring objects that died at end (ie IMMORTAL)
# TODO: We may not want this behavior for the graph...
# TODO: ...or even for everything? -RLV
if objinfo.died_at_end(obj):
continue # TODO Is this what we want in the analysis?
tmptype = objinfo.get_type(obj)
result = update_keytype_dict( ktdict = ktdict,
objId = obj,
objType = tmptype,
group = [ obj ],
objinfo = objinfo,
contextinfo = contextinfo,
group_types = frozenset([]),
max_age = max_age,
filterbytes = filterbytes,
dumpall = dumpall,
writer = writer,
logger = logger )
keyrec = objinfo.get_record(obj)
atime = objinfo.get_alloc_time_using_record( keyrec )
dtime = objinfo.get_death_time_using_record( keyrec )
dgraph.add_node( gnum, { "size" : 1,
"keytype" : str(tmptype),
"atime" : atime,
"ditme" : dtime, } )
# Get dtime and atime using objId
total_cc += 1
err_cc = ((err_cc + 1) if (not result) else err_cc)
# print "BY STACK - all primitive" # TODO Make into a logging statement
contextresult["total"] = contextresult["total"] + total_cc
contextresult["error"] = contextresult["error"] + err_cc
return DIEDBYSTACK # TODO This does not seem right.
# ======================================================================
if len(key_objects) == 1:
# Found key objects
found_key = True
tgt = key_objects[0]
result = ONEKEY
print " - single key object: %s" % objinfo.get_type(tgt)
if objinfo.died_at_end(tgt):
return DIEDATEND
mytype = objinfo.get_type(tgt)
if mytype == "[C":
tylist = [ objinfo.get_type(x) for x in group ]
if ( ("Ljava/lang/String;" in tylist) and
(len(group) == 2) ):
mytype = "Ljava/lang/String;"
else:
print "Y:", str(tylist)
return NOTFOUND
else:
# ======================================================================
if len(key_objects) > 1:
# Multiple keys?
tgt = None
# DEBUG ONLY
# debug_multiple_keys( group = group,
# key_objects = key_objects,
# objinfo = objinfo,
# logger = logger )
# END DEBUG
if objinfo.died_at_end(group[0]):
return DIEDATEND
result = MULTKEY
# DEBUG ONLY
# else:
# print " - DEBUG: NO marked key objects."
done = False
curindex = 0
while not done and (curindex < len(group)):
cur = group[curindex]
currec = objinfo.get_record(cur)
cur_dtime = currec[ get_index("DTIME") ]
curtype = objinfo.get_type(cur)
if is_primitive_array(curtype) or is_primitive_type(curtype):
curindex += 1
continue
else:
done = True
break
if not done or curindex >= len(group):
return NOTFOUND
curset = set([ cur ])
curtydict = { cur : curtype }
for tmp in group[curindex:]:
tmprec = objinfo.get_record(tmp)
tmp_dtime = tmprec[ get_index("DTIME") ]
tmptype = objinfo.get_type(tmp)
if is_primitive_array(tmptype) or is_primitive_type(tmptype):
continue
elif tmp_dtime > cur_dtime:
curset = set([ tmp ])
currec = tmprec
cur_dtime = tmp_dtime
curtydict = { tmp : tmptype }
elif tmp_dtime == cur_dtime:
if tmp not in curset:
curset.add( tmp )
curtydict[tmp] = tmptype
if len(curset) > 1:
print "--------------------------------------------------------------------------------"
print curset
print "--------------------------------------------------------------------------------"
for obj, mytype in curtydict.iteritems():
print "%d -> %s" % (obj, mytype)
likely = get_most_likely_keytype( objlist = list(curset),
tydict = curtydict )
curset = set([ likely ])
assert(len(curset) > 0 )
tgt = list(curset)[0]
mytype = curtydict[tgt]
# TODO Make into a logging statement
print " - key among multiples - %d [ %s ][ dtime: %d ]" % (cur, curtype, cur_dtime)
if mytype == "[C":
tylist = [ objinfo.get_type(x) for x in group ]
if ( ("Ljava/lang/String;" in tylist) and
(len(group) == 2) ):
mytype = "Ljava/lang/String;"
else:
print "Y:", str(tylist)
return NOTFOUND
# ----------------------------------------------------------------------------------
group_types = frozenset( [ objinfo.get_type(x) for x in group if x != tgt ] )
is_array_flag = is_array(mytype)
is_primitive_key = is_primitive_array(mytype)
if is_primitive_key and len(group_types) > 0:
debug_primitive_key( group = [ x for x in group if x != tgt ],
keytype = mytype,
keyId = tgt,
objinfo = objinfo,
logger = logger )
result = update_keytype_dict( ktdict = ktdict,
objId = tgt,
objType = mytype,
group = group,
objinfo = objinfo,
contextinfo = contextinfo,
group_types = group_types,
filterbytes = filterbytes,
max_age = max_age,
dumpall = dumpall,
writer = writer,
logger = logger )
# Add to graph
keyrec = objinfo.get_record(tgt)
atime = objinfo.get_alloc_time_using_record( keyrec )
dtime = objinfo.get_death_time_using_record( keyrec )
dgraph.add_node( gnum, { "size" : len(group),
"keytype" : str(mytype),
"atime" : atime,
"ditme" : dtime, } )
total_cc += 1
err_cc = ((err_cc + 1) if not result else err_cc)
# This looks like all debug.
# TODO print "-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-"
# TODO print "%s:" % mytype
# TODO if is_array(mytype) and len(group_types) > 0:
# TODO print " --- DEBUG:"
# TODO print "%d [ %s ] - %d" % (tgt, objinfo.get_type(tgt), objinfo.get_death_time(tgt))
# TODO for x in group:
# TODO tmptype = objinfo.get_type(x)
# TODO if x != tgt:
# TODO tmp = objinfo.get_record(x)
# TODO print "%d [ %s ][ by %s ] - %d" % \
# TODO (x, objinfo.get_type(x), tmp[ get_index("DIEDBY") ], tmp[ get_index("DTIME") ])
# TODO else:
# TODO for t in group_types:
# TODO print t,
# TODO print
# TODO print "-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-"
contextresult["total"] = contextresult["total"] + total_cc
contextresult["error"] = contextresult["error"] + err_cc
return ONEKEY
def check_host( benchmark = None,
worklist_config = {},
host_config = {} ):
thishost = socket.gethostname()
for wanthost in worklist_config[benchmark]:
if thishost in host_config[wanthost]:
return True
return False
def get_actual_hostname( hostname = "",
host_config = {} ):
for key, hlist in host_config.iteritems():
if hostname in hlist:
return key
return None
def __TODO_DELTE_LAST_EDGE():
lastrec = get_last_edge_record( group, edgeinfo, objinfo )
# print "%d @ %d : %d -> %s" % ( gnum,
# lastrec["dtime"],
# lastrec["target"],
# str(lastrec["lastsources"]) )
if len(lastrec["lastsources"]) == 1:
# Get the type
used_last_edge = True
tgt = lastrec["target"]
print " - last edge successful [%s]" % objinfo.get_type(tgt)
if objinfo.died_at_end(tgt):
return DIEDATEND
elif len(lastrec["lastsources"]) > 1:
print " - last edge has too many candidates. NO KEY OBJECT FOUND."
return NOTFOUND
# No need to do anything becuase this isn't a key object?
# But DO we need to update the counts of the death groups above TODO
elif len(lastrec["lastsources"]) == 0:
# Means stack object?
stackflag = objinfo.group_died_by_stack(group)
if not stackflag:
print " -X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-"
print " No last edge but group didn't die by stack as a whole:"
for obj in group:
rec = objinfo.get_record( obj )
print " [%d] : %s -> %s (%s)" % ( obj,
rec[ get_index("TYPE") ],
rec[ get_index("DIEDBY") ],
"DAE" if objinfo.died_at_end(obj) else "---" )
print " -X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-"
else:
print " died by stack. Making each object its own key object."
# Died by stack. Each object is its own key object
for obj in group:
if objinfo.died_at_end(obj):
print " - ignoring DIED AT END."
continue
tmptype = objinfo.get_type(obj)
if tmptype in ktdict:
ktdict[tmptype]["max"] = max( len(group), ktdict[tmptype]["max"] )
ktdict[tmptype]["total"] += 1
ktdict[mytype]["group_types"].update( [ frozenset([])] )
else:
is_array_flag = is_array(tmptype)
ktdict[tmptype] = { "total" : 1,
"max" : len(group),
"is_array": is_array_flag,
"group_types" : Counter( [ frozenset([]) ] ) }
return DIEDBYSTACK
# Expects the dgraph to have the proper nodes in it because
# of the death groups processing. Addes the edges from edgeinfo.
# Then calculates the 5 largest weakly connected components.
# TODO: This could be STRONGLY connected components.
# Writes the main graph, and the SCCs. Then returns the list
# of SCCs.
def build_and_save_graph( dgraph = None,
edgeinfo = None,
dgroups = None,
bmark = None,
workdir = None ):
# ----------------------------------------
# Add edges
max_gsize = 0
for gsrc in nx.nodes(dgraph):
# for every object in gsrc
if "size" not in dgraph.node[gsrc]:
dgraph.node[gsrc]["size"] = 1
max_gsize = dgraph.node[gsrc]["size"] if dgraph.node[gsrc]["size"] > max_gsize \
else max_gsize
for srcobj in dgroups.get_group(gsrc):
# for every target object
for tgtobj in edgeinfo.get_targets(srcobj):
# get the group that tgtobj belongs in
tgtgroup = dgroups.get_group_number(tgtobj)
if tgtgroup != None:
if tgtgroup not in dgraph[gsrc]:
dgraph.add_edge( gsrc,
tgtgroup,
{ "rawweight" : 1 } )
else:
dgraph[gsrc][tgtgroup]['rawweight'] += 1
for gsrc in nx.nodes(dgraph):
if "size" not in dgraph.node[gsrc]:
dgraph.node[gsrc]["size"] = 1
dgraph.node[gsrc]["weight"] = (dgraph.node[gsrc]["size"] / max_gsize) * 100.0
# ----------------------------------------
# Get max edge weight
if dgraph.number_of_edges() > 0:
weight_max = max( [ dgraph.edge[e[0]][e[1]]['rawweight'] for e in nx.edges(dgraph) ] )
# Assign the scaled weights as 'weight'
for e in dgraph.edges():
dgraph.edge[e[0]][e[1]]["weight"] = (dgraph.edge[e[0]][e[1]]['rawweight'] / weight_max) * 100.0
# ----------------------------------------
# Get the top 5 largest weakly connected components
try:
wcclist = sorted( nx.weakly_connected_component_subgraphs(dgraph),
key = len,
reverse = True )[:5]
except:
wcclist = sorted( nx.weakly_connected_component_subgraphs(dgraph),
key = len,
reverse = True )
# ----------------------------------------
# Save the graph
gmlfile = os.path.join( workdir, "%s-DGROUPS-GRAPH.gml" % bmark )
nx.write_gml(dgraph, gmlfile)
for gindex in xrange(len(wcclist)):
gtmp = wcclist[gindex]
gmlfile = os.path.join( workdir, "%s-DGROUPS-GRAPH-%d.gml" % (bmark, gindex+1) )
nx.write_gml(gtmp, gmlfile)
return wcclist
def group_analysis_ONE( graph = None,
root = None,
dtree = None ):
pass
# Split into 2 groups:
# - died before source group
# - died after source group
# TODO root_dtime =
def analyze_graphs( scclist = [] ):
# TODO TODO
return
slist = scclist
for gind in xrange(len(slist)):
# Get the largest death group
# TODO: Maybe get the top N death groups?
tgt = max( [ x for x in nx.nodes(slist[gind]) ],
key = lambda y: slist[gind].node[y]["size"] )
# Find the reachable nodes
dtree = nx.dfs_tree( slist[gind], tgt )