-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCompare.py
246 lines (201 loc) · 9.26 KB
/
Compare.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
from builtins import print
import subprocess
import time
import json
import os
import hashlib
import matplotlib
import matplotlib.pyplot as plt
from collections import defaultdict
import numpy as np
import argparse
import os
import shutil
command_sets = [
("C", "c", "gcc Main.c -o results/programc", "./results/programc"),
("C2", "c2", "gcc Main2.c -o results/programc2", "./results/programc2"),
# ("C++", "cpp", "g++ Main.cpp -o results/programcpp", "./results/programcpp"),
# ("Java", "java", "javac -d results Main.java", "java -cp results Main"),
# ("Python", "python", "", "python3 Main.py"),
# ("TypeScript", "typescript", "", "deno run --allow-net --allow-read --allow-write Main.ts"),
# ("C#", "csharp", "mcs -out:results/programcsharp Main.cs", "mono results/programcsharp"),
# ("Rust", "rust", "rustc Main.rs -o results/programrust", "./results/programrust"),
# ("Go", "go", "", "go run Main.go"),
# ("Haskell", "haskell", "ghc -odir results -hidir results Main.hs -o results/programhaskell", "./results/programhaskell"),
# ("Haskell**", "ghc -odir results -hidir results MainC.hs -o results/programhaskell_C", "./results/programhaskell_C"),
# ("Scala", "scala", "scalac -d ./results Main.scala", "scala -cp ./results CellularAutomaton"),
# ("Clojure", "clojure", "", "clojure Main.clj"),
# ("Perl", "perl", "", "./Main.pl"),
# ("PHP", "php", "", "php Main.php"),
# ("Kotlin", "kotlin", "kotlinc Main.kt -include-runtime -d results/MainKT.jar", "java -jar results/MainKT.jar"),
# ("JavaScript", "javascript", "", "node Main.js"),
]
# Function to execute a command (no timing)
def execute_command(command):
if command: # Only run if the command is not an empty string
subprocess.run(command, shell=True) # Using shell=True to interpret the command as a shell command
# Function to execute a command and measure its execution time
def time_command(command):
start_time = time.time()
subprocess.run(command, shell=True)
end_time = time.time()
return end_time - start_time
def read_inputs_from_file(file_path):
with open(file_path, 'r') as file:
lines = file.readlines()
rule_number = int(lines[0].strip())
initial_conditions = lines[1].strip()
generations = int(lines[2].strip())
return rule_number, initial_conditions, generations
# Function to get the path to the results file
def get_results_file_path():
results_directory = "results"
return os.path.join(results_directory, "run_data.json")
# Function to read existing data from the results file
def read_existing_data(file_path):
if os.path.exists(file_path):
with open(file_path, "r") as file:
return json.load(file)
return []
# Function to write data to the results file
def write_data_to_file(file_path, data):
with open(file_path, "w") as file:
json.dump(data, file, indent=4)
def calculate_average_run_times(runs):
run_times = defaultdict(list)
for run in runs:
run_times[run['label']].append(run['run_time'])
aggregateData = {}
for label, times in run_times.items():
aggregateData[label] = (sum(times) / len(times) , len(times))
return aggregateData
def display_average_run_times(averages):
print("\nAverage Run Times:")
print(f"{'Language'.ljust(20, ' ')}{'Average Time (s)'.ljust(20, ' ')}{'Number of Runs'}")
for label, (avg_time, num_runs) in sorted(averages.items(), key=lambda x: x[1]):
print(f"{label.ljust(20, ' ')}{f'{avg_time:.4f}'.ljust(20, ' ')}{str(num_runs).ljust(20, ' ')}")
def generate_and_save_graph(data ):
if not data:
print("No data available to plot.")
return
# Prepare data for plotting
generations = [run['generations'] for run in data]
run_times = [run['run_time'] for run in data]
# Plotting
plt.figure(figsize=(10, 6))
plt.scatter(generations, run_times, color='blue', label='Run Time')
plt.title('Elementry Cellular Automiton: Generations vs Run Time')
plt.xlabel('Generations')
plt.ylabel('Run Time (seconds)')
plt.legend()
plt.grid(True)
# Save the plot in the results directory
plt.savefig(os.path.join("results", "generations_vs_runtime.png"))
print("Graph has been saved.")
def run_each_command_set(existing_runs):
rule_number, initial_conditions, generations = read_inputs_from_file("input.txt")
hashes = defaultdict(list)
for label, version_indicator, compile_cmd, run_cmd in command_sets:
print(f"Running {label}...")
execute_command(compile_cmd)
run_time = time_command(run_cmd)
filename = f"results/r{rule_number}_g{generations}_i{initial_conditions}_{version_indicator}.pbm"
if os.path.exists(filename):
with open(filename, 'rb') as file:
file_contents = file.read()
file_hash = hashlib.sha256(file_contents).hexdigest()
hashes[file_hash].append(label)
else:
print(f"File {filename} not found for {label}")
run = {
"label": label,
"rule_number": rule_number,
"initial_conditions": initial_conditions,
"generations": generations,
"run_time": run_time
}
existing_runs.append(run)
write_data_to_file(get_results_file_path(), existing_runs)
if len(hashes) == 1:
print(f"\nAll generated images hash to {next(iter(hashes.keys()))}")
else:
print(f"\nWarning! The generated images hash to the following {len(hashes)} unique values:\n")
for file_hash, labels in hashes.items():
print(f"{file_hash}")
for label in labels:
print(f"\t{label}")
def generate_and_save_bar_graph(existing_runs, sort):
# Check if all runs have the same rule, generation count, and initial conditions
unique_configs = set((run['rule_number'], run['initial_conditions'], run['generations']) for run in existing_runs)
uniform_config = len(unique_configs) == 1
# Aggregate runs based on unique configuration
aggregated_runs = {}
label_colors = {} # Store colors for each label
colormap = matplotlib.colormaps['hsv']
for run in existing_runs:
label = run['label']
config_key = (label, run['rule_number'], run['initial_conditions'], run['generations'])
if config_key not in aggregated_runs:
aggregated_runs[config_key] = []
if label not in label_colors:
label_hash = int(hashlib.sha256(label.encode('utf-8')).hexdigest(), 16) % (10**8)
label_colors[label] = colormap(label_hash / (10**8))
aggregated_runs[config_key].append(run['run_time'])
# Calculate average run time for each configuration
config_avg_times = []
for config, times in aggregated_runs.items():
avg_time = sum(times) / len(times)
if uniform_config:
label = config[0] # Use only label if configurations are uniform
else:
label = f"{config[0]} (Rule {config[1]}, Gen {config[3]}, IC {config[2]})"
config_avg_times.append((label, avg_time, label_colors[config[0]]))
if sort:
config_avg_times.sort(key=lambda x: x[1])
config_labels, avg_run_times, colors = zip(*config_avg_times)
plt.figure(figsize=(12, 8))
y_pos = np.arange(len(config_labels))
plt.bar(y_pos, avg_run_times, align='center', alpha=0.7, color=colors)
plt.xticks(y_pos, config_labels, rotation='vertical')
plt.ylabel('Average Run Time (seconds)')
if uniform_config:
rule, ic, generations = unique_configs.pop()
plt.title(f'Average Run Times (Rule {rule}, {generations} generations, Initial Conditions="{ic}")')
else:
plt.title('Average Run Times by Configuration')
plt.tight_layout()
if not os.path.exists("results"):
os.makedirs("results")
plt.savefig(os.path.join("results", "configurations_vs_runtime.png"))
print("Bar graph has been saved.")
def main():
parser = argparse.ArgumentParser(description='Your program description.')
parser.add_argument('--graph', '-g', action='store_true', help='Generate and save a graph')
parser.add_argument('--average', '-avg', action='store_true', help='Calculate and display average run times')
parser.add_argument('--bar', '-b', action='store_true', help='Generate and save a bar graph')
parser.add_argument('--sort', action='store_true', help='Sort the bar graph')
parser.add_argument('--runs', type=int, help='Specify the number of runs')
parser.add_argument('--clear', action='store_true', help='Clear the results directory')
args = parser.parse_args()
# List to store all runs
existing_runs = read_existing_data(get_results_file_path())
if args.graph:
generate_and_save_graph(existing_runs)
return
if args.average:
aggregateData = calculate_average_run_times(existing_runs)
display_average_run_times(aggregateData)
return
if args.bar:
generate_and_save_bar_graph(existing_runs, args.sort)
return
if args.clear:
if os.path.exists("results"):
shutil.rmtree("results")
os.makedirs("results")
return
for i in range(0, args.runs if args.runs is not None else 1):
run_each_command_set(existing_runs)
return
if __name__ == "__main__":
main()