|
| 1 | +import os, re, subprocess, csv, argparse |
| 2 | +from git import Repo |
| 3 | +from statistics import mean |
| 4 | + |
| 5 | +# To run this script properly, you must set up your benchmarking subcases using git |
| 6 | +# Naming is important: |
| 7 | +# You must be on branch 'your_branch_name' - the script will not run your scene on this branch |
| 8 | +# And must have named your benchmark branches as 'your_branch_name-test1', 'your_branch_name-test2' etc |
| 9 | +# The script will check out these branches, run your scene and accumulate results |
| 10 | + |
| 11 | +# Edit to your system |
| 12 | +parser = argparse.ArgumentParser(description="Run benchmarks for a scene") |
| 13 | +parser.add_argument("-sofaExe", type=str, default="runSofa", help="Path to runSofa executable in your system") |
| 14 | +parser.add_argument("-scene", type=str, help="Path to scene file you wish to run") |
| 15 | +parser.add_argument("-iterations", type=int, default=100, help="Number of ODE solver iterations to perform") |
| 16 | +parser.add_argument("-tests", type=int, default=3, help="Number of tests to run") |
| 17 | +args = parser.parse_args() |
| 18 | + |
| 19 | +# get arguments |
| 20 | +runSofa=args.sofaExe |
| 21 | +# Scene name |
| 22 | +xml_name = args.scene |
| 23 | +# Runtime setup |
| 24 | +n_iterations = args.iterations |
| 25 | +# Number of tests to run for each case |
| 26 | +n_tests = args.tests |
| 27 | + |
| 28 | +# Dictionary to store results |
| 29 | +benchmarks = {} |
| 30 | +results = {'time': [] , 'fps' : [], 'iterations' : n_iterations, 'git-branch' : ''} |
| 31 | + |
| 32 | +# Get git info to find branches |
| 33 | +repo = Repo(search_parent_directories=True) |
| 34 | +branch_prefix = repo.active_branch.name |
| 35 | +benchmark_branches = [ |
| 36 | + branch for branch in repo.branches if branch.name.startswith(branch_prefix + '-')] |
| 37 | + |
| 38 | +print(f'Running {xml_name} spawned from {branch_prefix} with {n_iterations} iterations') |
| 39 | + |
| 40 | +output_filename = 'log.performance.csv' |
| 41 | +with open(output_filename, mode='w', newline='') as csv_file: |
| 42 | + csv_file.write(branch_prefix + ', time [s], fps\n') |
| 43 | + |
| 44 | + for branch in benchmark_branches: |
| 45 | + repo.git.checkout(branch.name) |
| 46 | + git_tag = branch.name[len(branch_prefix + '-'):] |
| 47 | + benchmarks[git_tag] = results |
| 48 | + benchmarks[git_tag]['git-branch'] = branch.name |
| 49 | + |
| 50 | + for i in range(n_tests): |
| 51 | + print(f'Git tag: {git_tag} - test {i+1}/{n_tests}') |
| 52 | + |
| 53 | + # This is the way to measure performance |
| 54 | + output = subprocess.run([runSofa, "-g", "batch", "-n", str(n_iterations), xml_name], shell=False, capture_output=True, text=True) |
| 55 | + for line in output.stdout.splitlines(): |
| 56 | + if "iterations done in" in line: |
| 57 | + numbers = re.findall(r"\d+\.\d+", line) |
| 58 | + time_taken, fps = float(numbers[-2]), float(numbers[-1]) |
| 59 | + benchmarks[git_tag]['time'].append(time_taken) |
| 60 | + benchmarks[git_tag]['fps'].append(fps) |
| 61 | + break |
| 62 | + |
| 63 | + ## This is to troubleshoot in case SOFA crashes and no message is available |
| 64 | + #output = subprocess.Popen([runSofa, "-g", "batch", "-n", str(n_iterations), xml_name], shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) |
| 65 | + #for line in output.stdout: |
| 66 | + # print(line, end="") |
| 67 | + # if "iterations done in" in line: |
| 68 | + # numbers = re.findall(r"\d+\.\d+", line) |
| 69 | + # time_taken, fps = numbers[-2], numbers[-1] |
| 70 | + # scenarios[scenario]['time'].append(time_taken) |
| 71 | + # scenarios[scenario]['fps'].append(fps) |
| 72 | + # break |
| 73 | + #output.wait() |
| 74 | + |
| 75 | + mean_time = mean(benchmarks[git_tag]['time']) |
| 76 | + mean_fps = mean(benchmarks[git_tag]['fps'] ) |
| 77 | + csv_file.write(f'{git_tag}, {mean_time}, {mean_fps}\n') |
| 78 | + |
| 79 | +repo.git.checkout(branch_prefix) |
0 commit comments