-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrunsynp.py
More file actions
266 lines (227 loc) · 9.15 KB
/
runsynp.py
File metadata and controls
266 lines (227 loc) · 9.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
import os
import importlib
import sys
import math
sys.path.append('../../')
sys.path.append('../')
from subprocess import STDOUT, PIPE, run
from distutils.util import strtobool
#import procedures
#from methods.erm import erm_procedure as erm
#from methods.cegqs import cegqs_procedure as cegqs
# import synthesizers
#from synthesizer.max_sharp_sat import mmc_synthesizer
from synthesizer.max_sat import ms_synthesizer
#from synthesizer.omt import omt_synthesizer
#from synthesizer.decision_trees import cart_tree_synthesizer
#from evaluator.recall_evaluator import evaluator
from evaluator.cochran_evaluator import evaluator as cochran
import timeit
from utils import logger
def writeConfig(dd_size, benchmark_path, features, fcount):
cf = open (f"{benchmark_path}/config.mmc", 'w')
cf.write (f"size = {dd_size}\n")
cf.write ("features = ")
index = 0;
sz = len(features)
for i, idx in zip(features, fcount):
if idx > 0:
nb = idx + 1
cf.write(f"{i}:{nb}")
else:
cf.write(f"{i}:{idx}")
#print (index, sz -1)
if index < (sz - 1) :
cf.write(", ")
index+=1
cf.write("\n")
cf.write("labels = Class:4\n")
cf.write("extend = false \n")
cf.write ("feature_defs = feature_defs\n")
cf.close()
def writeFeatureDefs(benchmark_path, features, fcount, labels, user_recommendation):
sz = len(features)
fd = open (f"{benchmark_path}/feature_defs.py", 'w')
#fd = open (f"feature_defs.py", 'w')
fd.write ("#Generated file \n")
fd.write ("def check_num_of_inputs(inputs):\n")
fd.write (f"\treturn len(inputs)=={sz}\n\n")
#write individual functions
cindex = 0
for i in features:
j = int(fcount[cindex])
#print (i, j)
#if j > 0:
fd.write(f"def {i}(inputs):\n")
fd.write ("\tassert(check_num_of_inputs(inputs))\n")
fd.write (f"\tvalue = inputs[{cindex}][1]\n")
findex = 0
values = user_recommendation[cindex]
values.sort(reverse=True)
for val in values:
fd.write (f"\tif value > {val}:\n")
fd.write (f"\t\treturn {findex}\n")
findex +=1
fd.write(f"\treturn {findex}\n\n")
cindex +=1
#currently assuming single label - have to change it to multiclass.
lsz = len(labels)
index = 0
for i in labels:
fd.write(f"def {i}(outputs):\n")
fd.write (f"\tvalue = outputs[{index}][1]\n")
fd.write (f"\treturn value\n\n")
fd.write("def retrieve_feature_defs(): \n")
fd.write("\tfeature_defs = {} \n")
for j, c in zip(features, fcount):
if c > 0:
fd.write(f"\tfeature_defs[\"{j}\"] = {j}\n")
for j in labels:
fd.write(f"\tfeature_defs[\"{j}\"] = {j}\n")
fd.write ( "\treturn feature_defs \n")
fd.close()
def query_yes_no(question, default='no'):
if default is None:
prompt = " [y/n] "
elif default == 'yes':
prompt = " [Y/n] "
elif default == 'no':
prompt = " [y/N] "
else:
raise ValueError(f"Unknown setting '{default}' for default.")
while True:
try:
resp = input(question + prompt).strip().lower()
if default is not None and resp == '':
return default == 'yes'
else:
return strtobool(resp)
except ValueError:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def genTrainingData(class_size, delta, epsilon, synthesis_benchmark_path):
# class_size = synthesizer.compute_class_size() #Not possible to do this anymore
num_of_samples = math.ceil(math.log((1+class_size)/delta)/epsilon)
print(f"Using {num_of_samples} samples ...")
sampler = importlib.import_module(f".sampler",synthesis_benchmark_path.replace("/",".").rstrip('.'))
samples = {}
#num_of_samples = 100 # for now
samples.update(sampler.uniform(num_of_samples))
logger.dump_samples(samples,synthesis_benchmark_path,f"erm_syn_samples")
return samples
def genTestData(delta, epsilon, benchmark_path):
z_value = 0
if delta == 0.05:
z_value = 1.96
elif delta == 0.01:
z_value = 2.33
else:
raise ValueError(f"z-value for delta {delta} unknown")
sampler = importlib.import_module(f".sampler",benchmark_path.replace("/",".").rstrip('.'))
num_of_samples = math.ceil((z_value**2)*0.25/(epsilon**2))
#num_of_samples = 100# for now
print(f"|---Using {num_of_samples} samples for test data")
eva_samples = {}
# Retrieve samples
eva_samples.update(sampler.uniform(num_of_samples))
logger.dump_samples(eva_samples,benchmark_path,f"dt_samples")
return eva_samples
def experiment(dd_size, delta, epsilon, dir_name,depth, class_size, features, labels):
synthesis_benchmark_path = f"experiments/ICML/california_census/"
print (synthesis_benchmark_path)
test = synthesis_benchmark_path.replace("/",".").rstrip('.')
print (test)
erm_synthesis_time = 0
# Generate training and test data
print ("Generating Training Data ... " )
samples = genTrainingData(class_size, delta, epsilon, synthesis_benchmark_path)
print ("Generating Test Data ... " )
test_data = genTestData(delta, epsilon, synthesis_benchmark_path )
samples_file = f"{synthesis_benchmark_path}samples/erm_syn_samples.csv"
test_file = f"{synthesis_benchmark_path}samples/dt_samples.csv"
#Run a opt decision tree tool
print ("Running Infer DT..............Please wait")
result = run (f"./synthesizer/InferDT/InferDT {samples_file} -v -x infer -t {test_file}", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True, timeout=300) #Timeout for 5 minutes
#result = run (f"./synthesizer/InferDT/InferDT {samples_file} -v -x infer", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
print ("Result of Infer DT")
res = result.stdout
print (res)
# printing result
strres = res.split()
user_recommendation=[]
fcount = []
recommend = False;
for f in features:
count = 0
for sentence in strres:
if sentence.count(f) > 0:
if not sentence.startswith("!"):
sent = sentence.replace(":", "")
user_recommendation.append(sent)
count +=1
fcount.append (count)
if count > 0: #The output contains the tree
recommend = True;
#Take user choices and recommendations
print ("Recommended Features and Partitions Using InferDT")
print (user_recommendation)
print (fcount)
findex = 0
rindex = 0
user_choice = []
user_count = []
for i in fcount:
reco = user_recommendation[rindex:rindex+i]
values = []
for e in reco:
tt = e.split(">")
values.append(tt[1])
values.sort(reverse=True)
print (f"For feature {features[findex]}, recommended values are" )
print(values)
x = input ("Please give the values of your interest. If you want to use the recommended values, press enter.): ").split()
if (len(x) == 0):
user_choice.append(values)
user_count.append (len(values))
else:
user_choice.append(x)
user_count.append(len(x))
findex +=1
rindex +=i
print ("Your choices are " )
print (user_choice)
print (user_count)
#Generate feature_defs.py
writeFeatureDefs(synthesis_benchmark_path, features, user_count, labels, user_choice)
for l in range (dd_size, 1, -1):
#create config.mmc
writeConfig(l, synthesis_benchmark_path, features, fcount)
#--------------Calling the synthesizer ------#
synthesis_time = 0
synthesizer.initialize(synthesis_benchmark_path)
start = timeit.default_timer()
print (f"Synthesizing program of size {l}")
erm_program_path, dot_path, count = synthesizer.synthesize(synthesis_benchmark_path,samples,f"ms_erm")
if erm_program_path != "" :
print(f"Synthesized program: {erm_program_path}\nVisualization: {dot_path}")
stop = timeit.default_timer()
synthesis_time = stop-start
num_of_samples = len(samples)
print(f"Synthesis time:{synthesis_time}")
if count>0:
print(f"Misclassification rate:{1-count/num_of_samples}")
print("----------------------------")
print("Evaluating program on test dataset...")
#refinement, misclass_rate = cochran.eval(test_data,synthesis_benchmark_path,erm_program_path, samples, delta, epsilon, 100)
#print("****************************************************")
#print(f"ERM misclass rate: {misclass_rate}. Time: {erm_synthesis_time}. Size: {depth}")
#print("****************************************************")
dd_size = 5
delta = 0.05
epsilon = 0.05
benchmark_name = "la"
class_size=100
synthesizer = ms_synthesizer
dir_name = f"{benchmark_name}_{dd_size}_{delta}_{epsilon}"
features= ["population","median_income",'latitude',"longitude"]
labels =["Class"]
experiment(dd_size,delta,epsilon,dir_name,dd_size, class_size, features, labels)