From 9385645de58c862ea8c0f4127ae0178fa4e945fb Mon Sep 17 00:00:00 2001 From: MVarshini Date: Sat, 9 Nov 2024 17:19:02 +0530 Subject: [PATCH] pagination params --- backend/app/api/v1/commons/hce.py | 11 ++- backend/app/api/v1/commons/ocm.py | 10 +- backend/app/api/v1/commons/ocp.py | 1 - backend/app/api/v1/commons/quay.py | 1 - backend/app/api/v1/commons/telco.py | 4 +- backend/app/api/v1/commons/utils.py | 2 +- backend/app/api/v1/endpoints/cpt/cptJobs.py | 63 ++++++++++-- backend/app/api/v1/endpoints/cpt/maps/hce.py | 41 ++++---- backend/app/api/v1/endpoints/cpt/maps/ocm.py | 25 ++--- backend/app/api/v1/endpoints/cpt/maps/ocp.py | 24 +++-- backend/app/api/v1/endpoints/cpt/maps/quay.py | 27 +++--- .../app/api/v1/endpoints/cpt/maps/telco.py | 23 +++-- backend/app/api/v1/endpoints/ocm/ocmJobs.py | 23 ++++- backend/app/api/v1/endpoints/ocp/graph.py | 8 +- .../app/api/v1/endpoints/telco/telcoJobs.py | 10 +- backend/app/services/search.py | 21 +--- backend/app/services/splunk.py | 25 ++++- frontend/src/actions/homeActions.js | 95 ++++++++++--------- frontend/src/actions/paginationActions.js | 71 ++++++++------ frontend/src/actions/telcoActions.js | 2 +- .../organisms/LoadingComponent/index.less | 26 ++--- .../components/organisms/Pagination/index.jsx | 8 +- .../src/components/templates/Home/index.jsx | 4 +- frontend/src/reducers/homeReducer.js | 3 + 24 files changed, 318 insertions(+), 210 deletions(-) diff --git a/backend/app/api/v1/commons/hce.py b/backend/app/api/v1/commons/hce.py index 701f0c2e..e6f24123 100644 --- a/backend/app/api/v1/commons/hce.py +++ b/backend/app/api/v1/commons/hce.py @@ -3,8 +3,10 @@ from app.services.search import ElasticService -async def getData(start_datetime: date, end_datetime: date, configpath: str): +async def getData(start_datetime: date, end_datetime: date, size:int, offset:int, configpath: str): query = { + "size": size, + "from": offset, "query": { "bool": { "filter": { @@ -17,14 +19,13 @@ async def getData(start_datetime: date, end_datetime: date, configpath: str): } } } - es = ElasticService(configpath=configpath) - response = await es.post(query=query, start_date=start_datetime, end_date=end_datetime, timestamp_field='date') + response = await es.post(query=query, size=size, start_date=start_datetime, end_date=end_datetime, timestamp_field='date') await es.close() - tasks = [item['_source'] for item in response] + tasks = [item['_source'] for item in response['data']] jobs = pd.json_normalize(tasks) jobs[['group']] = jobs[['group']].fillna(0) jobs.fillna('', inplace=True) if len(jobs) == 0: return jobs - return jobs + return ({'data':jobs, 'total': response['total']}) diff --git a/backend/app/api/v1/commons/ocm.py b/backend/app/api/v1/commons/ocm.py index 25870789..f8c77181 100644 --- a/backend/app/api/v1/commons/ocm.py +++ b/backend/app/api/v1/commons/ocm.py @@ -3,8 +3,10 @@ from app.services.search import ElasticService -async def getData(start_datetime: date, end_datetime: date, configpath: str): +async def getData(start_datetime: date, end_datetime: date, size:int, offset:int, configpath: str): query = { + "size": size, + "from": offset, "query": { "bool": { "filter": { @@ -19,9 +21,9 @@ async def getData(start_datetime: date, end_datetime: date, configpath: str): } es = ElasticService(configpath=configpath) - response = await es.post(query=query, start_date=start_datetime, end_date=end_datetime, timestamp_field='metrics.earliest') + response = await es.post(query=query, size=size, start_date=start_datetime, end_date=end_datetime, timestamp_field='metrics.earliest') await es.close() - tasks = [item['_source'] for item in response] + tasks = [item['_source'] for item in response['data']] jobs = pd.json_normalize(tasks) if len(jobs) == 0: return jobs @@ -32,7 +34,7 @@ async def getData(start_datetime: date, end_datetime: date, configpath: str): jobs.insert(len(jobs.columns), "ciSystem", "") jobs.fillna('', inplace=True) jobs['jobStatus'] = jobs.apply(convertJobStatus, axis=1) - return jobs + return {"data":jobs,"total": response["total"]} def fillCiSystem(row): diff --git a/backend/app/api/v1/commons/ocp.py b/backend/app/api/v1/commons/ocp.py index 9e496474..e1f1db93 100644 --- a/backend/app/api/v1/commons/ocp.py +++ b/backend/app/api/v1/commons/ocp.py @@ -28,7 +28,6 @@ async def getData(start_datetime: date, end_datetime: date, size:int, offset:int jobs = pd.json_normalize(tasks) if len(jobs) == 0: return jobs - jobs[['masterNodesCount', 'workerNodesCount', 'infraNodesCount', 'totalNodesCount']] = jobs[['masterNodesCount', 'workerNodesCount', 'infraNodesCount', 'totalNodesCount']].fillna(0) jobs.fillna('', inplace=True) diff --git a/backend/app/api/v1/commons/quay.py b/backend/app/api/v1/commons/quay.py index b09f1516..79e7b914 100644 --- a/backend/app/api/v1/commons/quay.py +++ b/backend/app/api/v1/commons/quay.py @@ -41,5 +41,4 @@ async def getData(start_datetime: date, end_datetime: date, size, offset, config cleanJobs = jobs[jobs['platform'] != ""] jbs = cleanJobs - return ({'data':jbs, 'total': response['total']}) diff --git a/backend/app/api/v1/commons/telco.py b/backend/app/api/v1/commons/telco.py index 302658d8..a3f0820b 100644 --- a/backend/app/api/v1/commons/telco.py +++ b/backend/app/api/v1/commons/telco.py @@ -34,7 +34,7 @@ async def getData(start_datetime: date, end_datetime: date, size: int, offset: i response = await splunk.query(query=query, size=size, offset=offset, searchList=searchList) mapped_list = [] - for each_response in response: + for each_response in response['data']: end_timestamp = int(each_response['timestamp']) test_data = each_response['data'] threshold = await telcoGraphs.process_json(test_data, True) @@ -68,4 +68,4 @@ async def getData(start_datetime: date, end_datetime: date, size: int, offset: i if len(jobs) == 0: return jobs - return jobs + return {'data':jobs, 'total':response['total']} diff --git a/backend/app/api/v1/commons/utils.py b/backend/app/api/v1/commons/utils.py index 6985e9e7..38b39a74 100644 --- a/backend/app/api/v1/commons/utils.py +++ b/backend/app/api/v1/commons/utils.py @@ -13,7 +13,7 @@ async def getMetadata(uuid: str, configpath: str) : es = ElasticService(configpath=configpath) response = await es.post(query=query) await es.close() - meta = [item['_source'] for item in response] + meta = [item['_source'] for item in response['data']] return meta[0] def updateStatus(job): diff --git a/backend/app/api/v1/endpoints/cpt/cptJobs.py b/backend/app/api/v1/endpoints/cpt/cptJobs.py index eeb92d40..b9f27101 100644 --- a/backend/app/api/v1/endpoints/cpt/cptJobs.py +++ b/backend/app/api/v1/endpoints/cpt/cptJobs.py @@ -39,7 +39,9 @@ async def jobs(start_date: date = Query(None, description="Start date for search end_date: date = Query(None, description="End date for searching jobs, format: 'YYYY-MM-DD'", examples=["2020-11-15"]), pretty: bool = Query(False, description="Output contet in pretty format."), size: int = Query(None, description="Number of jobs to fetch"), - offset: int = Query(None, description="Offset Number to fetch jobs from"),): + offset: int = Query(None, description="Offset Number to fetch jobs from"), + totalJobs: int = Query(None, description="Offset Number to fetch jobs from") + ): if start_date is None: start_date = datetime.utcnow().date() start_date = start_date - timedelta(days=5) @@ -51,20 +53,31 @@ async def jobs(start_date: date = Query(None, description="Start date for search return Response(content=json.dumps({'error': "invalid date format, start_date must be less than end_date"}), status_code=422) results_df = pd.DataFrame() + total_dict = {} + total = 0 with ProcessPoolExecutor(max_workers=cpu_count()) as executor: - futures = {executor.submit(fetch_product, product, start_date, end_date): product for product in products} + futures = {executor.submit(fetch_product, product, start_date, end_date, size, offset): product for product in products} for future in as_completed(futures): product = futures[future] try: result = future.result() - results_df = pd.concat([results_df, result]) + total_dict[product] = result['total'] + results_df = pd.concat([results_df, result['data']]) except Exception as e: print(f"Error fetching data for product {product}: {e}") - + + num = 0 if totalJobs is None else int(totalJobs) + if totalJobs == 0: + for product in total_dict: + total += int(total_dict[product]) + + totalJobs =max(total,num) response = { 'startDate': start_date.__str__(), 'endDate': end_date.__str__(), - 'results': results_df.to_dict('records') + 'results': results_df.to_dict('records'), + 'total': totalJobs, + 'offset': offset + size } if pretty: @@ -75,10 +88,12 @@ async def jobs(start_date: date = Query(None, description="Start date for search return jsonstring -async def fetch_product_async(product, start_date, end_date): +async def fetch_product_async(product, start_date, end_date, size, offset): try: - df = await products[product](start_date, end_date) - return df.loc[:, ["ciSystem", "uuid", "releaseStream", "jobStatus", "buildUrl", "startDate", "endDate", "product", "version", "testName"]] if len(df) != 0 else df + response = await products[product](start_date, end_date, size, offset) + if response: + df = response["data"] + return {"data":df.loc[:, ["ciSystem", "uuid", "releaseStream", "jobStatus", "buildUrl", "startDate", "endDate", "product", "version", "testName"]] if len(df) != 0 else df, "total":response["total"]} except ConnectionError: print("Connection Error in mapper for product " + product) except Exception as e: @@ -86,5 +101,33 @@ async def fetch_product_async(product, start_date, end_date): return pd.DataFrame() -def fetch_product(product, start_date, end_date): - return asyncio.run(fetch_product_async(product, start_date, end_date)) \ No newline at end of file +def fetch_product(product, start_date, end_date, size, offset): + return asyncio.run(fetch_product_async(product, start_date, end_date, size, offset)) + +def is_requested_size_available(total_count, offset, requested_size): + """ + Check if the requested size of data is available starting from a given offset. + + Args: + total_count (int): Total number of available records. + offset (int): The starting position in the dataset. + requested_size (int): The number of records requested. + + Returns: + bool: True if the requested size is available, False otherwise. + """ + return (offset + requested_size) <= total_count +def calculate_remaining_data(total_count, offset, requested_size): + """ + Calculate the remaining number of data items that can be fetched based on the requested size. + + Args: + total_count (int): Total number of available records. + offset (int): The starting position in the dataset. + requested_size (int): The number of records requested. + + Returns: + int: The number of records that can be fetched, which may be less than or equal to requested_size. + """ + available_data = total_count - offset # Data available from the offset + return min(available_data, requested_size) \ No newline at end of file diff --git a/backend/app/api/v1/endpoints/cpt/maps/hce.py b/backend/app/api/v1/endpoints/cpt/maps/hce.py index c4d6fa17..0dd1bc1e 100644 --- a/backend/app/api/v1/endpoints/cpt/maps/hce.py +++ b/backend/app/api/v1/endpoints/cpt/maps/hce.py @@ -2,8 +2,8 @@ from datetime import date ################################################################ -# This will return a DataFrame from HCE required by the CPT -# endpoint, it contians the following columns: +# This will return a Dictionary with from HCE required by the CPT +# endpoint, it contians totalJobs and a Dataframe with the following columns: # "ciSystem" # "uuid" # "releaseStream" @@ -15,23 +15,26 @@ # "version" # "testName" ################################################################ -async def hceMapper(start_datetime: date, end_datetime: date): - df = await getData(start_datetime, end_datetime, f'hce.elasticsearch') - if len(df) == 0: - return df - df["releaseStream"] = "Nightly" - df["ciSystem"] = "Jenkins" - df["testName"] = df["product"] + ":" + df["test"] - df["product"] = df["group"] - df["jobStatus"] = df['result'].apply(lambda x: "SUCCESS" if x == 'PASS' else "FAILURE") - df["version"] = df['version'].apply(lambda x: x if len(x.split(":")) == 1 else x.split(":")[1][:7]) - df["uuid"] = df["result_id"] - df["buildUrl"] = df["link"] - df["startDate"] = df["date"] - df["endDate"] = df["date"] - df = dropColumns(df) - return df - +import pandas as pd +async def hceMapper(start_datetime: date, end_datetime: date, size: int, offset: int): + response = await getData(start_datetime, end_datetime, size, offset, f'hce.elasticsearch') + if response: + df = response["data"] + if len(df) == 0: + return df + df["releaseStream"] = "Nightly" + df["ciSystem"] = "Jenkins" + df["testName"] = df["product"] + ":" + df["test"] + df["product"] = df["group"] + df["jobStatus"] = df['result'].apply(lambda x: "SUCCESS" if x == 'PASS' else "FAILURE") + df["version"] = df['version'].apply(lambda x: x if len(x.split(":")) == 1 else x.split(":")[1][:7]) + df["uuid"] = df["result_id"] + df["buildUrl"] = df["link"] + df["startDate"] = df["date"] + df["endDate"] = df["date"] + df = dropColumns(df) + return {"data":df, "total": response["total"]} + return {"data":pd.DataFrame(), "total":0} def dropColumns(df): df = df.drop(columns=["group","test","result","result_id","link","date","release"]) diff --git a/backend/app/api/v1/endpoints/cpt/maps/ocm.py b/backend/app/api/v1/endpoints/cpt/maps/ocm.py index f53bd13e..18f90435 100644 --- a/backend/app/api/v1/endpoints/cpt/maps/ocm.py +++ b/backend/app/api/v1/endpoints/cpt/maps/ocm.py @@ -1,18 +1,21 @@ from ....commons.ocm import getData from datetime import date - +import pandas as pd ################################################################ # This will return a DataFrame from OCM required by the CPT endpoint ################################################################ -async def ocmMapper(start_datetime: date, end_datetime: date): - df = await getData(start_datetime, end_datetime, f'ocm.elasticsearch') - if len(df) == 0: - return df - df.insert(len(df.columns), "product", "ocm") - df.insert(len(df.columns), "releaseStream", "Nightly") - df["testName"] = df["attack"] - df["startDate"] = df["metrics.earliest"] - df["endDate"] = df["metrics.end"] +async def ocmMapper(start_datetime: date, end_datetime: date, size:int, offset:int): + response = await getData(start_datetime, end_datetime, size, offset, f'ocm.elasticsearch') + if not isinstance(response, pd.DataFrame) and response: + df = response["data"] + if len(df) == 0: + return df + df.insert(len(df.columns), "product", "ocm") + df.insert(len(df.columns), "releaseStream", "Nightly") + df["testName"] = df["attack"] + df["startDate"] = df["metrics.earliest"] + df["endDate"] = df["metrics.end"] - return df + return {"data":df, "total": response["total"]} + return {"data":pd.DataFrame(), "total":0} diff --git a/backend/app/api/v1/endpoints/cpt/maps/ocp.py b/backend/app/api/v1/endpoints/cpt/maps/ocp.py index 69a3649d..a759575f 100644 --- a/backend/app/api/v1/endpoints/cpt/maps/ocp.py +++ b/backend/app/api/v1/endpoints/cpt/maps/ocp.py @@ -1,17 +1,21 @@ from ....commons.ocp import getData from ....commons.utils import getReleaseStream from datetime import date - +import pandas as pd ################################################################ # This will return a DataFrame from OCP required by the CPT endpoint ################################################################ -async def ocpMapper(start_datetime: date, end_datetime: date): - df = await getData(start_datetime, end_datetime, f'ocp.elasticsearch') - if len(df) == 0: - return df - df.insert(len(df.columns), "product", "ocp") - df["releaseStream"] = df.apply(getReleaseStream, axis=1) - df["version"] = df["shortVersion"] - df["testName"] = df["benchmark"] - return df +async def ocpMapper(start_datetime: date, end_datetime: date, size:int, offset:int): + response = await getData(start_datetime, end_datetime, size, offset, f'ocp.elasticsearch') + if not isinstance(response, pd.DataFrame) and response: + df = response["data"] + if len(df) == 0: + return df + df.insert(len(df.columns), "product", "ocp") + df["releaseStream"] = df.apply(getReleaseStream, axis=1) + df["version"] = df["shortVersion"] + df["testName"] = df["benchmark"] + return {"data":df, "total": response["total"]} + return {"data":pd.DataFrame(), "total": response["total"]} + \ No newline at end of file diff --git a/backend/app/api/v1/endpoints/cpt/maps/quay.py b/backend/app/api/v1/endpoints/cpt/maps/quay.py index 9eea25b1..7e4b312a 100644 --- a/backend/app/api/v1/endpoints/cpt/maps/quay.py +++ b/backend/app/api/v1/endpoints/cpt/maps/quay.py @@ -1,15 +1,18 @@ from ....commons.quay import getData from datetime import date +import pandas as pd - -##################################################################### -# This will return a DataFrame from Quay required by the CPT endpoint -##################################################################### -async def quayMapper(start_datetime: date, end_datetime: date): - df = await getData(start_datetime, end_datetime, f'quay.elasticsearch') - if len(df) == 0: - return df - df.insert(len(df.columns), "product", "quay") - df["version"] = df["releaseStream"] - df["testName"] = df["benchmark"] - return df +##################################################################################### +# This will return a DataFrame from Quay required by the CPT endpoint with Total jobs +##################################################################################### +async def quayMapper(start_datetime: date, end_datetime: date, size:int, offset: int): + response = await getData(start_datetime, end_datetime, size, offset, f'quay.elasticsearch') + if not isinstance(response, pd.DataFrame) and response: + df = response["data"] + if len(df) == 0: + return df + df.insert(len(df.columns), "product", "quay") + df["version"] = df["releaseStream"] + df["testName"] = df["benchmark"] + return {"data":df, "total": response["total"]} + return {"data":pd.DataFrame(), "total":0} diff --git a/backend/app/api/v1/endpoints/cpt/maps/telco.py b/backend/app/api/v1/endpoints/cpt/maps/telco.py index 51bb2d41..3f595e43 100644 --- a/backend/app/api/v1/endpoints/cpt/maps/telco.py +++ b/backend/app/api/v1/endpoints/cpt/maps/telco.py @@ -1,17 +1,20 @@ from ....commons.telco import getData from ....commons.utils import getReleaseStream from datetime import date - +import pandas as pd ##################################################################### # This will return a DataFrame from Telco required by the CPT endpoint ##################################################################### -async def telcoMapper(start_datetime: date, end_datetime: date): - df = await getData(start_datetime, end_datetime, f'telco.splunk') - if len(df) == 0: - return df - df.insert(len(df.columns), "product", "telco") - df["releaseStream"] = df.apply(getReleaseStream, axis=1) - df["version"] = df["shortVersion"] - df["testName"] = df["benchmark"] - return df +async def telcoMapper(start_datetime: date, end_datetime: date, size:int, offset: int): + response = await getData(start_datetime, end_datetime, size, offset, f'telco.splunk') + if not isinstance(response, pd.DataFrame) and response: + df = response["data"] + if len(df) == 0: + return df + df.insert(len(df.columns), "product", "telco") + df["releaseStream"] = df.apply(getReleaseStream, axis=1) + df["version"] = df["shortVersion"] + df["testName"] = df["benchmark"] + return {"data":df, "total": response["total"]} + return {"data":pd.DataFrame(), "total":0} diff --git a/backend/app/api/v1/endpoints/ocm/ocmJobs.py b/backend/app/api/v1/endpoints/ocm/ocmJobs.py index bc76b7eb..b298130e 100644 --- a/backend/app/api/v1/endpoints/ocm/ocmJobs.py +++ b/backend/app/api/v1/endpoints/ocm/ocmJobs.py @@ -21,7 +21,9 @@ },) async def jobs(start_date: date = Query(None, description="Start date for searching jobs, format: 'YYYY-MM-DD'", examples=["2020-11-10"]), end_date: date = Query(None, description="End date for searching jobs, format: 'YYYY-MM-DD'", examples=["2020-11-15"]), - pretty: bool = Query(False, description="Output contet in pretty format.")): + pretty: bool = Query(False, description="Output contet in pretty format."), + size: int = Query(None, description="Number of jobs to fetch"), + offset: int = Query(None, description="Offset Number to fetch jobs from")): if start_date is None: start_date = datetime.utcnow().date() start_date = start_date - timedelta(days=5) @@ -32,19 +34,30 @@ async def jobs(start_date: date = Query(None, description="Start date for search if start_date > end_date: return Response(content=json.dumps({'error': "invalid date format, start_date must be less than end_date"}), status_code=422) - results = await getData(start_date, end_date, 'ocm.elasticsearch') + if not offset: + offset = 0 + + if not size: + size = 10000 + offset = 0 + + results = await getData(start_date, end_date, size, offset, 'ocm.elasticsearch') - if len(results) >= 1: + if 'data' in results and len(results['data']) >= 1: response = { 'startDate': start_date.__str__(), 'endDate': end_date.__str__(), - 'results': results.to_dict('records') + 'results': results['data'].to_dict('records'), + 'total': results['total'], + 'offset': offset + size } else: response = { 'startDate': start_date.__str__(), 'endDate': end_date.__str__(), - 'results': [] + 'results': [], + 'total': 0, + 'offset': 0 } if pretty: diff --git a/backend/app/api/v1/endpoints/ocp/graph.py b/backend/app/api/v1/endpoints/ocp/graph.py index f3e090a0..f521e4a0 100644 --- a/backend/app/api/v1/endpoints/ocp/graph.py +++ b/backend/app/api/v1/endpoints/ocp/graph.py @@ -220,7 +220,7 @@ async def jobSummary(uuids: list): es = ElasticService(configpath="ocp.elasticsearch",index=index) response = await es.post(query=query) await es.close() - runs = [item['_source'] for item in response] + runs = [item['_source'] for item in response["data"]] return runs async def processBurner(data: dict) : @@ -346,7 +346,7 @@ async def getBurnerResults(uuid: str, uuids: list, index: str ): es = ElasticService(configpath="ocp.elasticsearch",index=index) response = await es.post(query=query) await es.close() - runs = [item['_source'] for item in response] + runs = [item['_source'] for item in response["data"]] return runs async def getResults(uuid: str, uuids: list, index: str ): @@ -366,7 +366,7 @@ async def getResults(uuid: str, uuids: list, index: str ): es = ElasticService(configpath="ocp.elasticsearch",index=index) response = await es.post(query=query) await es.close() - runs = [item['_source'] for item in response] + runs = [item['_source'] for item in response["data"]] return runs async def getMatchRuns(meta: dict, workerCount: False): @@ -416,7 +416,7 @@ async def getMatchRuns(meta: dict, workerCount: False): es = ElasticService(configpath="ocp.elasticsearch") response = await es.post(query=query) await es.close() - runs = [item['_source'] for item in response] + runs = [item['_source'] for item in response["data"]] uuids = [] for run in runs : uuids.append(run["uuid"]) diff --git a/backend/app/api/v1/endpoints/telco/telcoJobs.py b/backend/app/api/v1/endpoints/telco/telcoJobs.py index f46afea5..b2c13a68 100644 --- a/backend/app/api/v1/endpoints/telco/telcoJobs.py +++ b/backend/app/api/v1/endpoints/telco/telcoJobs.py @@ -37,17 +37,21 @@ async def jobs(start_date: date = Query(None, description="Start date for search results = await getData(start_date, end_date, size, offset, 'telco.splunk') - if len(results) >= 1 : + if len(results['data']) >= 1 : response = { 'startDate': start_date.__str__(), 'endDate': end_date.__str__(), - 'results': results.to_dict('records') + 'results': results['data'].to_dict('records'), + 'total': results['total'], + 'offset': offset + size } else : response = { 'startDate': start_date.__str__(), 'endDate': end_date.__str__(), - 'results': [] + 'results': [], + 'total': 0, + 'offset': 0 } if pretty: diff --git a/backend/app/services/search.py b/backend/app/services/search.py index 4a40aca2..3958a298 100644 --- a/backend/app/services/search.py +++ b/backend/app/services/search.py @@ -79,18 +79,13 @@ async def post(self, query, indice=None, size=None, start_date=None, end_date=No index=self.prev_index+"*", body=jsonable_encoder(query), size=size) - - print("hydrogen") - previous_results = {"data":response['hits']['hits'], "total":response['hits']["total"]["value"]} - print(previous_results) + previous_results = {"data":response['hits']['hits'], "total":response['hits']["total"]["value"]} else: response = await self.prev_es.search( index=self.prev_index+"*", body=jsonable_encoder(query), - size=size) - print("helium") - previous_results = {"data":response['hits']['hits'], "total":response['hits']["total"]["value"]} - print(previous_results) + size=size) + previous_results = {"data":response['hits']['hits'], "total":response['hits']["total"]["value"]} # previous_results = await self.scan_indices(self.prev_es, self.prev_index, query, timestamp_field, start_date, new_end_date, size) if self.prev_es and self.new_es: self.new_index = self.new_index_prefix + (self.new_index if indice is None else indice) @@ -107,23 +102,17 @@ async def post(self, query, indice=None, size=None, start_date=None, end_date=No response = await self.new_es.search( index=self.new_index+"*", body=jsonable_encoder(query), - size=size) - print("lithium") + size=size) new_results = {"data":response['hits']['hits'],"total":response['hits']['total']['value']} - print(new_results) else: response = await self.new_es.search( index=self.new_index+"*", body=jsonable_encoder(query), size=size) - print("bery") - new_results = {"data":response['hits']['hits'],"total":response['hits']['total']['value']} - print(new_results) + new_results = {"data":response['hits']['hits'],"total":response['hits']['total']['value']} # new_results = await self.scan_indices(self.new_es, self.new_index, query, timestamp_field, new_start_date, end_date, size) unique_data = await self.remove_duplicates(previous_results["data"] if("data" in previous_results) else [] + new_results["data"] if("data" in new_results) else[]) totalVal = previous_results["total"] if("total" in previous_results) else 0 + new_results["total"] if("total" in new_results) else 0 - print("boron") - print(unique_data) return ({"data":unique_data, "total": totalVal}) else: if start_date and end_date: diff --git a/backend/app/services/splunk.py b/backend/app/services/splunk.py index bffeebef..ae18888f 100644 --- a/backend/app/services/splunk.py +++ b/backend/app/services/splunk.py @@ -1,8 +1,7 @@ import orjson from app import config from splunklib import client, results - - +import json class SplunkService: """ Class to integrate splunk python client @@ -40,16 +39,34 @@ async def query(self, query, searchList='', size=None, offset=None, max_results= query (string): splunk query OPTIONAL: searchList (string): additional query parameters for index """ - query["count"] = size + query["count"] = size query["offset"] = offset # If additional search parameters are provided, include those in searchindex searchindex = "search index={} {}".format(self.indice, searchList) if searchList else "search index={}".format(self.indice) + + search_query = "search index={} {} | stats count AS total_records".format(self.indice, searchList) if searchList else "search index={} | stats count AS total_records".format(self.indice) + + try: + # Run the job and retrieve results + job = self.service.jobs.create(search_query, exec_mode="normal", earliest_time=query["earliest_time"], latest_time=query["latest_time"]) + + #Wait for the job to finish + while not job.is_done(): + job.refresh() + oneshotsearch_results = self.service.jobs.oneshot(searchindex, **query) + except Exception as e: print('Error querying splunk: {}'.format(e)) return None + + #Fetch the results + for result in job.results(output_mode="json"): + decoded_data = json.loads(result.decode('utf-8')) + value = decoded_data.get("results") + total_records = value[0]['total_records'] # Get the results and display them using the JSONResultsReader res_array = [] @@ -67,7 +84,7 @@ async def query(self, query, searchList='', size=None, offset=None, max_results= except Exception as e: print(f'Error on including Splunk record query in results array: {e}') - return res_array + return {'data':res_array, 'total':total_records} async def _stream_results(self, oneshotsearch_results): for record in results.JSONResultsReader(oneshotsearch_results): diff --git a/frontend/src/actions/homeActions.js b/frontend/src/actions/homeActions.js index 30e45194..303c621a 100644 --- a/frontend/src/actions/homeActions.js +++ b/frontend/src/actions/homeActions.js @@ -16,49 +16,55 @@ import API from "@/utils/axiosInstance"; import { cloneDeep } from "lodash"; import { showFailureToast } from "@/actions/toastActions"; -export const fetchOCPJobsData = () => async (dispatch) => { - try { - dispatch({ type: TYPES.LOADING }); - - const params = dispatch(getRequestParams("cpt")); - - const response = await API.get(API_ROUTES.CPT_JOBS_API_V1, { params }); - if (response.status === 200) { - const startDate = response.data.startDate, - endDate = response.data.endDate; - //on initial load startDate and endDate are empty, so from response append to url - appendDateFilter(startDate, endDate); - dispatch({ - type: TYPES.SET_CPT_DATE_FILTER, - payload: { - start_date: startDate, - end_date: endDate, - }, - }); +export const fetchOCPJobsData = + (isNewSearch = false) => + async (dispatch, getState) => { + try { + dispatch({ type: TYPES.LOADING }); + + const params = dispatch(getRequestParams("cpt")); + const results = getState().cpt.results; + params["totalJobs"] = getState().cpt.totalJobs; + const response = await API.get(API_ROUTES.CPT_JOBS_API_V1, { params }); + if (response.status === 200) { + const startDate = response.data.startDate, + endDate = response.data.endDate; + //on initial load startDate and endDate are empty, so from response append to url + appendDateFilter(startDate, endDate); + dispatch({ + type: TYPES.SET_CPT_DATE_FILTER, + payload: { + start_date: startDate, + end_date: endDate, + }, + }); + } + + if (response?.data?.results?.length > 0) { + dispatch({ + type: TYPES.SET_CPT_JOBS_DATA, + payload: isNewSearch + ? response.data.results + : [...results, ...response.data.results], + }); + dispatch({ + type: TYPES.SET_CPT_PAGE_TOTAL, + payload: { + total: response.data.total, + offset: response.data.offset, + // currProd: response.data.currProd, + }, + }); + + dispatch(applyFilters()); + dispatch(sortTable("cpt")); + dispatch(tableReCalcValues()); + } + } catch (error) { + dispatch(showFailureToast()); } - - if (response?.data?.results?.length > 0) { - dispatch({ - type: TYPES.SET_CPT_JOBS_DATA, - payload: response.data.results, - }); - dispatch({ - type: TYPES.SET_CPT_PAGE_TOTAL, - payload: { - total: response.data.total, - offset: response.data.offset, - }, - }); - - dispatch(applyFilters()); - dispatch(sortTable("cpt")); - dispatch(tableReCalcValues()); - } - } catch (error) { - dispatch(showFailureToast()); - } - dispatch({ type: TYPES.COMPLETED }); -}; + dispatch({ type: TYPES.COMPLETED }); + }; export const setCPTSortIndex = (index) => ({ type: TYPES.SET_CPT_SORT_INDEX, @@ -206,7 +212,7 @@ export const setCPTDateFilter = appendQueryString({ ...appliedFilters, start_date, end_date }, navigate); - dispatch(fetchOCPJobsData()); + // dispatch(fetchOCPJobsData()); }; export const setCPTPage = (pageNo) => ({ @@ -233,4 +239,7 @@ export const tableReCalcValues = () => (dispatch, getState) => { const { page, perPage } = getState().cpt; dispatch(getCPTSummary()); dispatch(setCPTPageOptions(page, perPage)); + const startIdx = page !== 0 ? (page - 1) * perPage : 0; + const endIdx = startIdx + perPage - 1; + dispatch(sliceCPTTableRows(startIdx, endIdx)); }; diff --git a/frontend/src/actions/paginationActions.js b/frontend/src/actions/paginationActions.js index f2cd24e7..09416cc8 100644 --- a/frontend/src/actions/paginationActions.js +++ b/frontend/src/actions/paginationActions.js @@ -6,9 +6,9 @@ import { } from "./ocpActions"; import { fetchOCPJobsData, - setCPTOffset, setCPTPage, setCPTPageOptions, + sliceCPTTableRows, } from "./homeActions"; import { fetchQuayJobsData, @@ -24,27 +24,23 @@ import { } from "./telcoActions"; export const setPage = (newPage, currType) => (dispatch) => { - if (currType === "cpt") { - dispatch(setCPTPage(newPage)); - } else if (currType === "ocp") { - dispatch(setOCPPage(newPage)); - } else if (currType === "quay") { - dispatch(setQuayPage(newPage)); - } else if (currType === "telco") { - dispatch(setTelcoPage(newPage)); - } + const actions = { + cpt: setCPTPage, + ocp: setOCPPage, + quay: setQuayPage, + telco: setTelcoPage, + }; + dispatch(actions[currType](newPage)); }; export const setPageOptions = (newPage, newPerPage, currType) => (dispatch) => { - if (currType === "cpt") { - dispatch(setCPTPageOptions(newPage, newPerPage)); - } else if (currType === "ocp") { - dispatch(setOCPPageOptions(newPage, newPerPage)); - } else if (currType === "quay") { - dispatch(setQuayPageOptions(newPage, newPerPage)); - } else if (currType === "telco") { - dispatch(setTelcoPageOptions(newPage, newPerPage)); - } + const actions = { + cpt: setCPTPageOptions, + ocp: setOCPPageOptions, + quay: setQuayPageOptions, + telco: setTelcoPageOptions, + }; + dispatch(actions[currType](newPage, newPerPage)); }; const calculateOffset = (pageNumber, itemsPerPage) => { @@ -52,22 +48,35 @@ const calculateOffset = (pageNumber, itemsPerPage) => { }; export const checkTableData = (newPage, currType) => (dispatch, getState) => { - const { results, totalJobs, perPage } = getState()[currType]; + const { results, totalJobs, perPage, page } = getState()[currType]; + const fetchActions = { + ocp: fetchOCPJobs, + quay: fetchQuayJobsData, + telco: fetchTelcoJobsData, + }; + const offsetActions = { + ocp: setOCPOffset, + quay: setQuayOffset, + telco: setTelcoOffset, + }; const hasPageData = results.length >= newPage * perPage; const offset = calculateOffset(newPage, perPage); if (results.length < totalJobs && !hasPageData) { if (currType === "cpt") { - dispatch(setCPTOffset(offset)); - dispatch(fetchOCPJobsData()); - } else if (currType === "ocp") { - dispatch(setOCPOffset(offset)); - dispatch(fetchOCPJobs()); - } else if (currType === "quay") { - dispatch(setQuayOffset(offset)); - dispatch(fetchQuayJobsData()); - } else if (currType === "telco") { - dispatch(setTelcoOffset(offset)); - dispatch(fetchTelcoJobsData()); + const startIdx = (page - 1) * perPage; + const endIdx = startIdx + perPage - 1; + if (results[startIdx] === undefined || results[endIdx] === undefined) { + dispatch(fetchOCPJobsData()); + } + } else { + dispatch(offsetActions[currType](offset)); + dispatch(fetchActions[currType]()); + } + } else { + if (currType === "cpt") { + const startIdx = (page - 1) * perPage; + const endIdx = startIdx + perPage - 1; + dispatch(sliceCPTTableRows(startIdx, endIdx)); } } }; diff --git a/frontend/src/actions/telcoActions.js b/frontend/src/actions/telcoActions.js index 9b9c821b..59475d51 100644 --- a/frontend/src/actions/telcoActions.js +++ b/frontend/src/actions/telcoActions.js @@ -195,7 +195,7 @@ export const setTelcoDateFilter = appendQueryString({ ...appliedFilters, start_date, end_date }, navigate); - dispatch(fetchTelcoJobsData()); + //dispatch(fetchTelcoJobsData()); }; export const getTelcoSummary = () => (dispatch, getState) => { diff --git a/frontend/src/components/organisms/LoadingComponent/index.less b/frontend/src/components/organisms/LoadingComponent/index.less index 84e6cfe7..135fdb39 100644 --- a/frontend/src/components/organisms/LoadingComponent/index.less +++ b/frontend/src/components/organisms/LoadingComponent/index.less @@ -14,16 +14,16 @@ } } } -// .main-with-spinner { -// pointer-events: none; -// height: 100%; -// // position: fixed; /* Sit on top of the page content */ -// width: 100%; /* Full width (cover the whole page) */ -// top: 0; -// left: 0; -// right: 0; -// bottom: 0; -// background-color: rgba(0, 0, 0, 0.08); /* background with opacity */ -// z-index: 2; /* Specify a stack order in case you're using a different order for other elements */ -// cursor: pointer; /* Add a pointer on hover */ -// } +.main-with-spinner { + pointer-events: none; + height: 100%; + // position: fixed; /* Sit on top of the page content */ + width: 100%; /* Full width (cover the whole page) */ + top: 0; + left: 0; + right: 0; + bottom: 0; + opacity: 0.4; + z-index: 2; /* Specify a stack order in case you're using a different order for other elements */ + cursor: pointer; /* Add a pointer on hover */ +} diff --git a/frontend/src/components/organisms/Pagination/index.jsx b/frontend/src/components/organisms/Pagination/index.jsx index d5b5691d..65f7bbd6 100644 --- a/frontend/src/components/organisms/Pagination/index.jsx +++ b/frontend/src/components/organisms/Pagination/index.jsx @@ -21,20 +21,23 @@ const RenderPagination = (props) => { const onSetPage = useCallback( (_evt, newPage, _perPage, startIdx, endIdx) => { dispatch(setPage(newPage, props.type)); - dispatch(checkTableData(newPage, props.type)); + // dispatch(checkTableData(newPage, props.type)); }, [dispatch, props.type] ); const onPerPageSelect = useCallback( (_evt, newPerPage, newPage, startIdx, endIdx) => { dispatch(setPageOptions(newPage, newPerPage, props.type)); - dispatch(checkTableData(newPage, props.type)); + // dispatch(checkTableData(newPage, props.type)); }, [dispatch, props.type] ); const onNextClick = useCallback( (_evt, newPage) => { + if (props.type === "cpt") { + dispatch(setPage(newPage, props.type)); + } dispatch(checkTableData(newPage, props.type)); }, [dispatch, props.type] @@ -52,6 +55,7 @@ const RenderPagination = (props) => { onPerPageSelect={onPerPageSelect} onNextClick={onNextClick} onPageInput={onNextClick} + isCompact={props.type === "cpt" ? true : false} /> ); }; diff --git a/frontend/src/components/templates/Home/index.jsx b/frontend/src/components/templates/Home/index.jsx index d02d0ee5..52b281b0 100644 --- a/frontend/src/components/templates/Home/index.jsx +++ b/frontend/src/components/templates/Home/index.jsx @@ -22,7 +22,7 @@ const Home = () => { tableColumns, activeSortDir, activeSortIndex, - results, + tableData, filterOptions, tableFilters, categoryFilterValue, @@ -91,7 +91,7 @@ const Home = () => { /> { return { ...state, activeSortIndex: payload }; case TYPES.SET_CPT_SORT_DIR: return { ...state, activeSortDir: payload }; + case TYPES.SET_CPT_INIT_JOBS: + return { ...state, tableData: payload }; case TYPES.SET_CPT_FILTER_DATA: return { ...state, filterData: payload }; case TYPES.SET_CATEGORY_FILTER: