diff --git a/djangobench/benchmarks/query_evaluating/benchmark.py b/djangobench/benchmarks/query_evaluating/benchmark.py new file mode 100644 index 0000000..e739256 --- /dev/null +++ b/djangobench/benchmarks/query_evaluating/benchmark.py @@ -0,0 +1,24 @@ +from djangobench.utils import run_benchmark + + +def benchmark(): + global MultiField + list(MultiField.objects.raw('select id from query_evaluating_multifield')) + +def setup(): + global MultiField + from query_evaluating.models import MultiField + for i in range(0, 1000): + kwargs = {} + for j in range(1, 11): + kwargs['field%s' % j] = 'foobar_%s_%s' % (i, j) + MultiField(**kwargs).save() + +run_benchmark( + benchmark, + setup=setup, + meta={ + 'Description': 'Evaluating the overall performance of the system.', + } +) + diff --git a/djangobench/benchmarks/query_evaluating/models.py b/djangobench/benchmarks/query_evaluating/models.py new file mode 100644 index 0000000..0012b7d --- /dev/null +++ b/djangobench/benchmarks/query_evaluating/models.py @@ -0,0 +1,14 @@ +from django.db import models + + +class MultiField(models.Model): + field1 = models.CharField(max_length=100) + field2 = models.CharField(max_length=100) + field3 = models.CharField(max_length=100) + field4 = models.CharField(max_length=100) + field5 = models.CharField(max_length=100) + field6 = models.CharField(max_length=100) + field7 = models.CharField(max_length=100) + field8 = models.CharField(max_length=100) + field9 = models.CharField(max_length=100) + field10 = models.CharField(max_length=100) \ No newline at end of file diff --git a/djangobench/benchmarks/query_evaluating/settings.py b/djangobench/benchmarks/query_evaluating/settings.py new file mode 100644 index 0000000..a212b67 --- /dev/null +++ b/djangobench/benchmarks/query_evaluating/settings.py @@ -0,0 +1,3 @@ +from djangobench.base_settings import * # NOQA + +INSTALLED_APPS = ['query_evaluating'] \ No newline at end of file