diff --git a/app/site/_includes/graph-toggle.liquid b/app/site/_includes/graph-toggle.liquid index 2ca5907911..bc693f6ed9 100644 --- a/app/site/_includes/graph-toggle.liquid +++ b/app/site/_includes/graph-toggle.liquid @@ -34,7 +34,7 @@ {% endif %} {% assign graphPath = graph | strip %} {% assign distPath = baseurl | append: "/assets/img/graphs" | append: graphPath %} - {% assign fileExtension = path | split: '.' | last %} + {% assign fileExtension = graphPath | split: '.' | last %}
{% if fileExtension == 'svg' %} diff --git a/app/site/_layouts/repo-report.liquid b/app/site/_layouts/repo-report.liquid index 3fd984cd24..8470dfbffb 100644 --- a/app/site/_layouts/repo-report.liquid +++ b/app/site/_layouts/repo-report.liquid @@ -1,7 +1,22 @@ --- layout: base --- + +
{% assign project = projects | findObject: repo %} @@ -155,6 +170,40 @@ layout: base

{{ project.pull_requests_count }}

+
+
+

+ + + {% lucide "calendar" %} + + + Project Creation Date + +

+
+

+ + {{ project.created_at | date: '%B %d, %Y' }} + + +

+
{% else %}

Error Occurred: Object Not Found

diff --git a/scripts/gen_graphs.py b/scripts/gen_graphs.py index 0709baf86b..bcd22a2034 100644 --- a/scripts/gen_graphs.py +++ b/scripts/gen_graphs.py @@ -2,8 +2,46 @@ Module to define methods to create pygals graphs """ import datetime +from datetime import timedelta +import re import pygal +def percent_formatter(x): + """ + Function to format percentage values. + + Arguments: + x: Value to format into a percent + Returns: + A string containing the formatted version of x + """ + + return '{:0.2f}%'.format(x) + +def timedelta_formatter(x): + """ + Function to format percentage values. + + Arguments: + x: Value to format into days + Returns: + A string containing the formatted version of x + """ + + return '{} days'.format(x.days) + +def ignore_formatter(x): + """ + Function to ignore values in formatting + + Arguments: + x: Value to ignore + Returns: + A string containing the formatted version of x + """ + + return '' + def generate_all_graphs_for_repos(all_repos): """ Function to generate and save all graphs for the input @@ -16,14 +54,36 @@ def generate_all_graphs_for_repos(all_repos): print(f"Generating graphs for repo {repo.name}") generate_solid_gauge_issue_graph(repo) generate_repo_sparklines(repo) + generate_predominant_languages_graph(repo) + generate_language_summary_pie_chart(repo) + generate_cost_estimates_bar_chart(repo) + generate_time_estimates_bar_chart(repo) + generate_average_issue_resolution_graph(repo) try: generate_donut_graph_line_complexity_graph(repo) - generate_time_xy_issue_graph(repo, "new_commit_contributors_by_day_over_last_month", "New Contributors") - generate_time_xy_issue_graph(repo, "new_commit_contributors_by_day_over_last_six_months", "New Contributors") + generate_time_xy_issue_graph( + repo, "new_commit_contributors_by_day_over_last_month", "New Contributors" + ) + generate_time_xy_issue_graph( + repo, "new_commit_contributors_by_day_over_last_six_months", "New Contributors" + ) except KeyError as e: print(f"Could not find metrics to build graphs for repo {repo.name}") print(e) + try: + generate_libyears_graph(repo) + except KeyError: + print(f"Repository {repo.name} has no deps data associated with it!") + + try: + generate_dryness_percentage_graph(repo) + except ValueError as e: + print("Could not parse DRYness due to percentage values being invalid!") + print(e) + except KeyError as e: + print(f"Could not find metrics to build dryness graphs for repo {repo.name}") + print(e) def generate_all_graphs_for_orgs(all_orgs): """ @@ -39,10 +99,14 @@ def generate_all_graphs_for_orgs(all_orgs): generate_time_xy_issue_graph(org, "new_issues_by_day_over_last_month", "New Issues") generate_top_committer_bar_graph(org) + try: + generate_libyears_graph(org) + except KeyError: + print(f"Org {org.name} has no deps data associated with it!") def write_repo_chart_to_file(repo, chart, chart_name, custom_func=None, custom_func_params={}): """ - This function's purpose is to save a pygals chart to a path derived from the + This function's purpose is to save a pygals chart to a path derived from the repository object passed in. Arguments: @@ -63,7 +127,6 @@ def write_repo_chart_to_file(repo, chart, chart_name, custom_func=None, custom_f f"Repo {repo.name} has a division by zero error when trying to make graph") # issues_gauge.render_to_file(repo.get_path_to_graph_data("issue_gauge")) - def generate_repo_sparklines(repo): """ This function generates pygals sparklines graphs for a set of Repository objects. @@ -114,14 +177,13 @@ def generate_time_xy_issue_graph(oss_entity,data_key,legend_key): write_repo_chart_to_file(oss_entity, xy_time_issue_chart, data_key) - def generate_donut_graph_line_complexity_graph(oss_entity): """ This function generates pygals line complexity donut graph for a set of Repository objects. Arguments: - oss_entity: The OSSEntity to create a graph for. an + oss_entity: The OSSEntity to create a graph for. an OSSEntity is a data structure that is typically a repository or an organization. """ @@ -142,7 +204,6 @@ def generate_donut_graph_line_complexity_graph(oss_entity): write_repo_chart_to_file(oss_entity, donut_lines_graph, "total_line_makeup") - def generate_solid_gauge_issue_graph(oss_entity): """ This function generates pygals solid gauge issue/pr graphs for a set of Repository objects. @@ -153,8 +214,6 @@ def generate_solid_gauge_issue_graph(oss_entity): issues_gauge = pygal.SolidGauge(inner_radius=0.70, legend_at_bottom=True) - def percent_formatter(x): - return '{:0.2f}%'.format(x) issues_gauge.value_formatter = percent_formatter # Generate graph to measure percentage of issues that are open @@ -164,6 +223,10 @@ def percent_formatter(x): oss_entity.metric_data['issues_count'] except ZeroDivisionError: open_issue_percent = 0 + except TypeError: + print("Repo has no issues") + return + issues_gauge.add( 'Open Issues', [{'value': open_issue_percent * 100, 'max_value': 100}]) @@ -200,7 +263,7 @@ def generate_top_committer_bar_graph(oss_entity): Arguments: oss_entity: the OSSEntity to create a graph for. """ - + # Create a bar chart object bar_chart = pygal.Bar() bar_chart.title = f"Top Committers in {oss_entity.metric_data['name']}" @@ -216,4 +279,327 @@ def generate_top_committer_bar_graph(oss_entity): bar_chart.add(committer, commits) contributor_count += 1 - write_repo_chart_to_file(oss_entity, bar_chart, "top_committers") \ No newline at end of file + write_repo_chart_to_file(oss_entity, bar_chart, "top_committers") + +def generate_predominant_languages_graph(oss_entity): + """ + This function generates a pygal predominant programming languages guage graph. + + Arguments: + oss_entity: the OSSEntity to create a graph for. + """ + + bar_chart = pygal.Bar() + bar_chart.title = f"Predominant Languages in {oss_entity.metric_data['name']}" + + predominant_lang = oss_entity.metric_data['predominant_langs'] + + for lang, lines in predominant_lang.items(): + bar_chart.add(lang, lines) + + write_repo_chart_to_file(oss_entity, bar_chart, "predominant_langs") + +def parse_libyear_list(dependency_list): + """ + Parses the dependency list returned from the libyear metric into a list of python dictionaries + that have correctly parsed dates. + + Arguments: + dependency_list: the list of lists that has the deps data + + Returns: + A list of dictionaries describing deps + """ + + to_return = [] + for dep in dependency_list: + + #print(dep) + date = datetime.datetime.strptime(dep[-1], '%Y-%m-%dT%H:%M:%S.%f') + to_return.append( + { + "dep_name": dep[-3], + "libyear_value": dep[-2], + "libyear_date_last_updated": date + } + ) + + #return list sorted by date + return sorted(to_return, key=lambda d : d["libyear_value"]) + + +def generate_libyears_graph(oss_entity): + """ + Generates a pygal graph to describe libyear metrics for the requested oss_entity + + Arguments: + oss_entity: the OSSEntity to create a libyears graph for. + """ + + try: + raw_dep_list = oss_entity.metric_data['repo_dependency_libyear_list'] + except KeyError: + raw_dep_list = oss_entity.metric_data['dependency_libyear_list'] + + if not raw_dep_list: + return + + #This is going to be kind of hacky since pygals doesn't have a + #timeline object + #TODO: Contribute upstream to add a timeline object to pygal + dateline = pygal.TimeDeltaLine(x_label_rotation=25,legend_at_bottom=True) + dateline.x_value_formatter = timedelta_formatter + dateline.value_formatter = ignore_formatter + + + dep_list = parse_libyear_list(raw_dep_list) + total_libyears_ood = sum(n['libyear_value'] for n in dep_list) + + dateline.title = f"""Dependency Libyears: Age of Dependency Version + Total Libyears: {round(total_libyears_ood,1)}""" + + #We are going to treat the y-axis as having one dep per level in the graph + elevation = 0 + for dep in dep_list: + dateline.add(dep["dep_name"], [ + (timedelta(), elevation), + (timedelta(days=dep["libyear_value"] * 365), elevation), + ]) + + #move one line up so that we have no overlap in the timedeltas + elevation += 1 + + if elevation >= 40: + break + + dateline.show_y_labels = False + write_repo_chart_to_file(oss_entity, dateline, "libyear_timeline") + +def parse_cocomo_dryness_metrics(dryness_string): + """ + This function parses the output of the scc dryness metrics. + + For some reason, ULOC, SLOC, and DRYness don't show up in the json and + only show up in the stdout text. + + Arguments: + dryness_string: the string containing the dryness table to parse + + Returns: + A dictionary with the unique lines of code and DRYness percentage + """ + + dryness_metrics = {} + + #Parse output line by line + for line in dryness_string.split('\n'): + #Parse the parts that we want into fields + if 'Unique Lines of Code' in line: + #Use regex to remove all non-numerals from the string + dryness_metrics['total_uloc'] = re.sub('[^0-9.]','',line) + if 'DRYness' in line: + #Use regex to remove all non-numerals from the string + dryness_metrics['DRYness_percentage'] = re.sub('[^0-9.]','',line) + + return dryness_metrics + +def generate_dryness_percentage_graph(oss_entity): + """ + This function generates a pygal DRYness pie graph. + + DRYness = ULOC / SLOC + + WETness = 1 - DRYness + + DRY = Don't repeat yourself + WET = Waste Everybody's time or Write Everything Twice + """ + + dryness_values = parse_cocomo_dryness_metrics( + oss_entity.metric_data["cocomo"]['dryness_table'] + ) + + sloc = (float(dryness_values['total_uloc']) / float(dryness_values['DRYness_percentage'])) + sloc_diff = sloc - float(dryness_values['total_uloc']) + sloc_percent = (sloc_diff / sloc) * 100 + + uloc_percent = (float(dryness_values['total_uloc']) / sloc) * 100 + + pie_chart = pygal.Pie(half_pie=True, legend_at_bottom=True) + pie_chart.value_formatter = percent_formatter + pie_chart.title = 'DRYness Percentage Graph' + + #print(dryness_values) + + pie_chart.add( + 'Unique Lines of Code (ULOC) %', uloc_percent + ) + + #Will cause a value error if the dryness value is NaN which can happen. + pie_chart.add( + 'Source Lines of Code (SLOC) %', + #sloc = uloc / DRYness + sloc_percent + ) + + write_repo_chart_to_file(oss_entity, pie_chart, "DRYness") + + +def generate_language_summary_pie_chart(oss_entity): + """ + This function generates a pygal pie chart for programming languages + and total lines written in each language. + + The total LoC is displayed in the chart's title. + + Arguments: + oss_entity: the OSSEntity to create a graph for. + """ + + pie_chart = pygal.Pie() + + language_summary = oss_entity.metric_data.get('cocomo', {}).get('languageSummary') + if not language_summary: + print("No valid 'languageSummary' found in the data.") + return + + total_loc = sum(entry.get('Code', 0) for entry in language_summary) + + pie_chart.title = f'Language Summary \n Total Source Lines of Code (SLOC): {total_loc:,}' + + pie_chart.value_formatter = lambda x: f'{x} SLOC' + + for entry in language_summary: + code_lines = entry.get('Code', 0) + pie_chart.add(entry['Name'], code_lines) + + write_repo_chart_to_file(oss_entity, pie_chart, "language_summary") + + +def generate_cost_estimates_bar_chart(oss_entity): + """ + This function generates a pygal bar chart for estimated costs + with rounded values and a dollar sign. + + Arguments: + oss_entity: the OSSEntity to create a graph for. + """ + + bar_chart = pygal.Bar(legend_at_bottom=True) + + if oss_entity.metric_data is not None: + metric_data = oss_entity.metric_data.get('cocomo', {}) + estimated_cost_low = float(metric_data.get('estimatedCost_low', 0) or 0.0) + estimated_cost_high = float(metric_data.get('estimatedCost_high', 0) or 0.0) + else: + estimated_cost_low = 0.0 + estimated_cost_high = 0.0 + + bar_chart.value_formatter = lambda x: f'${x:,.2f}' + + average_cost = (estimated_cost_low + + estimated_cost_high) / 2 + + bar_chart.title = f'Estimated Project Costs in $ From Constructive Cost Model (COCOMO) \n Average Cost: ${average_cost:,.2f}' + + bar_chart.add(f'Estimated Cost Low (${estimated_cost_low:,.2f})', + estimated_cost_low) + bar_chart.add(f'Estimated Cost High (${estimated_cost_high:,.2f})', + estimated_cost_high) + + write_repo_chart_to_file(oss_entity, bar_chart, "estimated_project_costs") + + +def generate_time_estimates_bar_chart(oss_entity): + """ + This function generates a pygal bar chart for estimated time + of project in months rounded to the nearest tenth. + + estimatedScheduleMonths_low is used for time. + + Arguments: + oss_entity: the OSSEntity to create a graph for. + """ + + bar_chart = pygal.Bar(legend_at_bottom=True) + + if oss_entity.metric_data is not None: + metric_data = oss_entity.metric_data.get('cocomo', {}) + estimated_schedule_months_low = metric_data.get('estimatedScheduleMonths_low', 0) + else: + estimated_schedule_months_low = 0 + + formatted_estimated_months = float(estimated_schedule_months_low or 0.0) + + bar_chart.value_formatter = lambda x: f'{x:,.1f} mos' + + bar_chart.title = 'Estimated Project Time in Months From Constructive Cost Model (COCOMO)' + + bar_chart.add(None, [0]) + bar_chart.add(f'Estimated Time ({formatted_estimated_months:,.1f} mos)', + estimated_schedule_months_low) + bar_chart.add(None, [0]) + + write_repo_chart_to_file(oss_entity, bar_chart, "estimated_project_time") + + +def generate_people_estimate_bar_chart(oss_entity): + """ + This function generates a pygal bar chart for estimated people + working on the project rounded to the nearest integer. + + estimatedPeople_low is used for contributors. + + Arguments: + oss_entity: the OSSEntity to create a graph for. + """ + + bar_chart = pygal.Bar(legend_at_bottom=True) + + if oss_entity.metric_data is not None: + metric_data = oss_entity.metric_data.get('cocomo', {}) + estimated_people_low = metric_data.get('estimatedPeople_low', 0) + else: + estimated_people_low = 0 + + bar_chart.value_formatter = lambda x: f'{x:,.0f} ppl' + + bar_chart.title = 'Estimated Individual Project Contributors From Constructive Cost Model (COCOMO)' + + bar_chart.add(None, [0]) + bar_chart.add(f'Estimated Contributors ({estimated_people_low:,.0f} ppl)', estimated_people_low) + bar_chart.add(None, [0]) + + write_repo_chart_to_file(oss_entity, bar_chart, "estimated_people_contributing") + +def generate_average_issue_resolution_graph(oss_entity): + """ + This function generates a pygal gauge chart for average issue resolution time. + + Arguments: + oss_entity: An object containing the metric data. + """ + gauge_graph = pygal.Gauge(legend_at_bottom=True) + + metric_data = oss_entity.metric_data.get('average_issue_resolution_time') + if not metric_data or not metric_data[0]: + print("No data available for average issue resolution time") + return + + data = metric_data[0] + repo_name = data[0] + average_time_str = data[1] + + if "days" in average_time_str: + days_str = average_time_str.split(' days ') + days = int(days_str[0]) + else: + print("Average issue resolution time is less than a day") + return + + gauge_graph.range = [0, round((days + 20))] + + gauge_graph.title = f"Average Issue Resolution Time for {repo_name} \n Average Time: {round(days)} days" + gauge_graph.add("Days", round(days)) + + write_repo_chart_to_file(oss_entity, gauge_graph, "average_issue_resolution_time") diff --git a/scripts/metricsLib/constants.py b/scripts/metricsLib/constants.py index 752f7a6016..af1a71ab20 100644 --- a/scripts/metricsLib/constants.py +++ b/scripts/metricsLib/constants.py @@ -6,7 +6,8 @@ from pathlib import Path from enum import Enum -TIMEOUT_IN_SECONDS = 20 +TIMEOUT_IN_SECONDS = 120 +REQUEST_RETRIES = 5 BASE_PATH = os.path.dirname(os.path.abspath(__file__)) # Folder Names to send over our projects tracked data PATH_TO_METRICS_DATA = (Path(__file__).parent / diff --git a/scripts/metricsLib/metrics_data_structures.py b/scripts/metricsLib/metrics_data_structures.py index a607034da7..c2d67fcbb9 100644 --- a/scripts/metricsLib/metrics_data_structures.py +++ b/scripts/metricsLib/metrics_data_structures.py @@ -4,10 +4,11 @@ import json from json.decoder import JSONDecodeError import datetime +from time import sleep, mktime, gmtime, time, localtime from functools import reduce import operator import requests -from metricsLib.constants import TIMEOUT_IN_SECONDS, GH_GQL_ENDPOINT +from metricsLib.constants import TIMEOUT_IN_SECONDS, GH_GQL_ENDPOINT, REQUEST_RETRIES # Simple metric that can be represented by a count or value. @@ -75,25 +76,52 @@ def hit_metric(self, params=None): endpoint_to_hit = self.url.format(**params) request_params = None - if self.headers: - _args_ = (self.method, endpoint_to_hit) - _kwargs_ = { - "params": request_params, - "headers": self.headers, - "timeout": TIMEOUT_IN_SECONDS - } - response = requests.request(*_args_, **_kwargs_) - else: - response = requests.request( - self.method, endpoint_to_hit, params=request_params, timeout=TIMEOUT_IN_SECONDS) - - try: - if response.status_code == 200: - response_json = json.loads(response.text) + attempts = 0 + + while attempts < REQUEST_RETRIES: + if self.headers: + _args_ = (self.method, endpoint_to_hit) + _kwargs_ = { + "params": request_params, + "headers": self.headers, + "timeout": TIMEOUT_IN_SECONDS + } + response = requests.request(*_args_, **_kwargs_) else: - raise ConnectionError(f"Non valid status code {response.status_code}!") - except JSONDecodeError: - response_json = {} + response = requests.request( + self.method, endpoint_to_hit, params=request_params, timeout=TIMEOUT_IN_SECONDS) + + try: + if response.status_code == 200: + response_json = json.loads(response.text) + break + elif response.status_code in (403,429): + #rate limit was triggered. + wait_until = int(response.headers.get("x-ratelimit-reset")) + wait_in_seconds = int( + mktime(gmtime(wait_until)) - + mktime(gmtime(time())) + ) + wait_until_time = localtime(wait_until) + + print(f"Ran into rate limit sleeping for {self.name}!") + print( + f"sleeping until {wait_until_time.tm_hour}:{wait_until_time.tm_min} ({wait_in_seconds} seconds)" + ) + sleep(wait_in_seconds) + + response_json = {} + attempts += 1 + + if attempts >= REQUEST_RETRIES: + raise ConnectionError( + f"Rate limit was reached and couldn't be rectified after {attempts} tries" + ) + else: + raise ConnectionError(f"Non valid status code {response.status_code}!") + except JSONDecodeError: + response_json = {} + attempts += 1 return response_json @@ -336,6 +364,9 @@ def get_values(self, params=None): # EX: storing the date and count of each time the amount of followers # increased. try: + #Only continue if the api_label is a list + if type(api_label) != list: + raise TypeError list(api_label) # initialize each label as an empty list diff --git a/scripts/metricsLib/metrics_definitions.py b/scripts/metricsLib/metrics_definitions.py index 2fac3ea936..67331f702d 100644 --- a/scripts/metricsLib/metrics_definitions.py +++ b/scripts/metricsLib/metrics_definitions.py @@ -141,6 +141,14 @@ "/repo-groups/{repo_group_id}/top-committers", {"top_committers": ["email", "commits"]})) +ORG_METRICS.append(ListMetric("orgLibyears", ["repo_group_id"], + AUGUR_HOST + + "/repo-groups/{repo_group_id}/libyear", + {"dependency_libyear_list": [ + "repo_name", "name","libyear","most_recent_collection" + ] + })) + CONTRIBS_LABEL_LAST_MONTH = "new_commit_contributors_by_day_over_last_month" PERIODIC_METRICS.append(ListMetric("newContributorsofCommitsWeekly", @@ -176,7 +184,6 @@ AUGUR_HOST + "/pull_request_reports/PR_time_to_first_response/" + "?repo_id={repo_id}&start_date={begin_month}&end_date={end_date}")) - ORG_GITHUB_GRAPHQL_QUERY = """ query ($org_login: String!) { organization(login: $org_login) { @@ -231,3 +238,25 @@ NADIA_ENDPOINT = AUGUR_HOST + "/repos/{repo_id}/nadia-project-labeling-badge/" ADVANCED_METRICS.append(CustomMetric("getNadiaBadgeURL",[ "repo_id"],NADIA_ENDPOINT, parse_nadia_label_into_badge)) + +REPO_LIBYEAR_ENDPOINT = AUGUR_HOST + "/repo-groups/{repo_group_id}/repos/{repo_id}/libyear" +ADVANCED_METRICS.append(ListMetric( + "repoLibyears", + ["repo_group_id","repo_id"], + REPO_LIBYEAR_ENDPOINT, + { + "repo_dependency_libyear_list" : [ + "name","libyear","most_recent_collection" + ] + } + ) +) + +SIMPLE_METRICS.append(ListMetric("averageIssueResolutionTime", sixMonthsParams, AUGUR_HOST + "/repos/" + "{repo_id}" + "/average-issue-resolution-time", {"average_issue_resolution_time": ["repo_name", "avg_issue_resolution_time"]})) + +# Metric for Average Commit Counts per PR +# TODO: - Currently not working because of something wrong on Augur's end. Develop a solution here (hacky) or fix upstream. + +# RESOURCE_METRICS.append(ResourceMetric("averageCommitsPerPR", sixMonthsParams, +# AUGUR_HOST + "/pull_request_reports/average_commits_per_PR/" + +# "?repo_id={repo_id}&start_date={begin_month}&end_date={end_date}")) diff --git a/templates/org_report_template.md b/templates/org_report_template.md index 9211d31f56..6d2e04e97e 100644 --- a/templates/org_report_template.md +++ b/templates/org_report_template.md @@ -111,5 +111,7 @@ date_stampLastWeek: {date_stamp} {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_owner}_new_issues_by_day_over_last_six_months.svg", title: "New Issues over Last 6 Months" %}} {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_owner}_top_committers.svg", title: "Top Committers" %}} + + {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_owner}_libyear_timeline.svg", title: "Dependency Libyears" %}} \ No newline at end of file diff --git a/templates/repo_report_template.md b/templates/repo_report_template.md index 13e928227d..67ee1c7cd5 100644 --- a/templates/repo_report_template.md +++ b/templates/repo_report_template.md @@ -111,4 +111,20 @@ date_stampLastWeek: {date_stamp} {{% assign optionsArray = '1 Month, 6 Month' | split: ',' %}} {{% assign graphsArray = '/{repo_owner}/{repo_name}/new_commit_contributors_by_day_over_last_month_{repo_name}_data.svg, /{repo_owner}/{repo_name}/new_commit_contributors_by_day_over_last_six_months_{repo_name}_data.svg' | split: ',' %}} {{% render "graph-toggle", baseurl: site.baseurl, name: "new-contributors" options: optionsArray, graphs: graphsArray, title: "Number of Contributors Joining per Interval" %}} + + {{% assign optionsArray = 'Summary, Predominant' | split: ',' %}} + {{% assign graphsArray = "/{repo_owner}/{repo_name}/language_summary_{repo_name}_data.svg, /{repo_owner}/{repo_name}/predominant_langs_{repo_name}_data.svg" | split: ',' %}} + {{% render "graph-toggle" baseurl: site.baseurl, name:"language-information" options: optionsArray, graphs: graphsArray, title: "Language Information" %}} + + {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_name}/average_issue_resolution_time_{repo_name}_data.svg", title: "Average Issue Resolution Time" %}} + + {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_name}/libyear_timeline_{repo_name}_data.svg", title: "Dependency Libyears" %}} + + {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_name}/DRYness_{repo_name}_data.svg", title: "DRYness Percentage Graph" %}} + + {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_name}/estimated_project_costs_{repo_name}_data.svg", title: "Estimated Costs" %}} + + {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_name}/estimated_project_time_{repo_name}_data.svg", title: "Estimated Time" %}} + + {{% render "graph-section" baseurl: site.baseurl, path: "/{repo_owner}/{repo_name}/estimated_people_contributing_{repo_name}_data.svg", title: "Estimated Individual Contributors" %}} \ No newline at end of file