Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🔧 Update kibana dashboards config to accommodate more rows in … #189

Merged
merged 4 commits into from
Jul 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions docker/kibana/exports/dashboards.ndjson

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions docker/kibana/exports/required_fields_index_template.json
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,12 @@
}
}
},
"isStreamingJob": {
"type": "boolean"
},
"isScheduledJob": {
"type": "boolean"
},
"errorExpr": {
"type": "text",
"fields": {
Expand Down
5 changes: 5 additions & 0 deletions tofhir-engine/src/main/resources/logback.xml
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,11 @@
<appender-ref ref="FLUENT" />
</logger>

<logger name="io.tofhir.engine.execution.RunningJobRegistry">
<appender-ref ref="ASYNC-AUDIT" />
<appender-ref ref="ASYNC-AUDIT-FLUENT" />
</logger>

<!-- Give me DEBUG level logs from io.tofhir package because the default is set to ERROR at root (above) -->
<logger name ="io.tofhir" level="DEBUG" />

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@ package io.tofhir.engine.execution
import it.sauronsoftware.cron4j.{Scheduler, SchedulerListener, TaskExecutor}
import com.typesafe.scalalogging.Logger
import io.tofhir.engine.Execution.actorSystem.dispatcher
import io.tofhir.engine.model.FhirMappingJobExecution
import io.tofhir.engine.model.{FhirMappingJobExecution, FhirMappingJobResult}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.StreamingQuery

import java.util.UUID
import java.util.concurrent.Executors
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

import io.tofhir.engine.Execution.actorSystem
/**
* Execution manager that keeps track of running and scheduled mapping tasks in-memory.
* This registry is designed to maintain the execution status of both Streaming and Batch mapping jobs.
Expand All @@ -28,15 +28,35 @@ class RunningJobRegistry(spark: SparkSession) {
private val runningTasks: collection.mutable.Map[String, collection.mutable.Map[String, FhirMappingJobExecution]] =
collection.mutable.Map[String, collection.mutable.Map[String, FhirMappingJobExecution]]()

// Keeps the scheduled jobs in the form of: jobId -> (executionId -> Scheduler)
private val scheduledTasks: collection.mutable.Map[String, collection.mutable.Map[String, Scheduler]] =
collection.mutable.Map[String, collection.mutable.Map[String, Scheduler]]()
// Keeps the scheduled jobs in the form of: jobId -> (executionId -> (Scheduler, execution))
private val scheduledTasks: collection.mutable.Map[String, collection.mutable.Map[String, (Scheduler,FhirMappingJobExecution)]] =
collection.mutable.Map[String, collection.mutable.Map[String, (Scheduler,FhirMappingJobExecution)]]()

// Dedicated execution context for blocking streaming jobs
private val streamingTaskExecutionContext: ExecutionContext = ExecutionContext.fromExecutor(Executors.newCachedThreadPool)

private val logger: Logger = Logger(this.getClass)

/**
* When the actor system is terminated i.e., the system is shutdown, log the status of running mapping jobs
* as 'STOPPED' and scheduled mapping jobs as 'DESCHEDULED'.
*/
actorSystem.whenTerminated
.map(_ => {
// iterate over all running tasks and log each one as 'STOPPED'
runningTasks.values.flatMap(_.values)
.foreach(execution => {
val jobResult = FhirMappingJobResult(execution, None, status = Some(FhirMappingJobResult.STOPPED))
logger.info(jobResult.toMapMarker, jobResult.toString)
})
// iterate over all scheduled tasks and log each one as 'DESCHEDULED'
scheduledTasks.values.flatMap(_.values.map(_._2))
.foreach(execution => {
val jobResult = FhirMappingJobResult(execution, None, status = Some(FhirMappingJobResult.DESCHEDULED))
logger.info(jobResult.toMapMarker, jobResult.toString)
})
})

/**
* Caches a [[FhirMappingJobExecution]] for an individual mapping task
*
Expand Down Expand Up @@ -132,9 +152,11 @@ class RunningJobRegistry(spark: SparkSession) {
def registerSchedulingJob(mappingJobExecution: FhirMappingJobExecution, scheduler: Scheduler): Unit = {
// add it to the scheduledTasks map
scheduledTasks
.getOrElseUpdate(mappingJobExecution.jobId, collection.mutable.Map[String, Scheduler]())
.put(mappingJobExecution.id, scheduler)
logger.debug(s"Scheduling job ${mappingJobExecution.jobId} has been registered")
.getOrElseUpdate(mappingJobExecution.jobId, collection.mutable.Map[String, (Scheduler,FhirMappingJobExecution)]())
.put(mappingJobExecution.id, (scheduler, mappingJobExecution))
// log the mapping job status as 'SCHEDULED'
val jobResult = FhirMappingJobResult(mappingJobExecution, None, status = Some(FhirMappingJobResult.SCHEDULED))
logger.info(jobResult.toMapMarker, jobResult.toString)
// add a scheduler listener to monitor task events
scheduler.addSchedulerListener(new SchedulerListener {
override def taskLaunching(executor: TaskExecutor): Unit = {
Expand Down Expand Up @@ -164,8 +186,11 @@ class RunningJobRegistry(spark: SparkSession) {
// stop the job execution
stopJobExecution(jobId, executionId)
// stop the scheduler for the specified job execution
scheduledTasks(jobId)(executionId).stop()
scheduledTasks(jobId)(executionId)._1.stop()
logger.debug(s"Descheduled the mapping job with id: $jobId and execution: $executionId")
// log the mapping job status as 'DESCHEDULED'
val jobResult = FhirMappingJobResult(scheduledTasks(jobId)(executionId)._2, None, status = Some(FhirMappingJobResult.DESCHEDULED))
logger.info(jobResult.toMapMarker, jobResult.toString)
// remove the execution from the scheduledTask Map
scheduledTasks(jobId).remove(executionId)
// if there are no executions left for the job, remove the job from the map
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ class FhirMappingJobManager(
fhirWriter.validate()
mappingJobExecution.mappingTasks.foldLeft(Future((): Unit)) { (f, task) => // Initial empty Future
f.flatMap { _ => // Execute the Futures in the Sequence consecutively (not in parallel)
// log the start of the FHIR mapping task execution
val jobResult = FhirMappingJobResult(mappingJobExecution, Some(task.mappingRef))
logger.info(jobResult.toMapMarker, jobResult.toString)

Expand Down Expand Up @@ -114,6 +115,7 @@ class FhirMappingJobManager(
mappingJobExecution.mappingTasks
.map(t => {
logger.debug(s"Streaming mapping job ${mappingJobExecution.jobId}, mapping url ${t.mappingRef} is started and waiting for the data...")
// log the start of the FHIR mapping task execution
val jobResult = FhirMappingJobResult(mappingJobExecution, Some(t.mappingRef))
logger.info(jobResult.toMapMarker, jobResult.toString)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import java.util.regex.Pattern
* @param fileSystemSourceDataFolderPath If execution has a file system source, this is data folder path of it
* @param archiveMode Archive mode of execution
* @param saveErroneousRecords Whether to save erroneous records or not
* @param isScheduledJob Whether the execution is scheduled or not
*/
case class FhirMappingJobExecution(id: String,
projectId: String,
Expand All @@ -29,7 +30,8 @@ case class FhirMappingJobExecution(id: String,
isStreamingJob: Boolean,
fileSystemSourceDataFolderPath: Option[String],
archiveMode: ArchiveModes,
saveErroneousRecords: Boolean
saveErroneousRecords: Boolean,
isScheduledJob: Boolean
) {
/**
* Returns the map of streaming queries i.e. map of (mapping url -> streaming query)
Expand Down Expand Up @@ -175,9 +177,11 @@ object FhirMappingJobExecution {
archiveMode = job.dataProcessingSettings.archiveMode
saveErroneousRecords = job.dataProcessingSettings.saveErroneousRecords
}
// check whether it is a scheduled job or not
val isScheduledJob = job.schedulingSettings.nonEmpty

// Create a FhirMappingJobExecution with only necessary properties
FhirMappingJobExecution(id, projectId, job.id, mappingTasks, jobGroupIdOrStreamingQuery, isStreamingJob, fileSystemSourceDataFolderPath, archiveMode, saveErroneousRecords)
FhirMappingJobExecution(id, projectId, job.id, mappingTasks, jobGroupIdOrStreamingQuery, isStreamingJob, fileSystemSourceDataFolderPath, archiveMode, saveErroneousRecords, isScheduledJob)
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ case class FhirMappingJobResult(mappingJobExecution: FhirMappingJobExecution,
markerMap.put("numOfFhirResources", numOfFhirResources)
markerMap.put("numOfFailedWrites", numOfFailedWrites)
markerMap.put("eventId", eventId)
markerMap.put("isStreamingJob", mappingJobExecution.isStreamingJob)
markerMap.put("isScheduledJob", mappingJobExecution.isScheduledJob)
// create a new MapMarker using the marker map
new MapMarker("marker", markerMap)
}
Expand Down Expand Up @@ -115,4 +117,16 @@ object FhirMappingJobResult {
* It allows distinguishing between tasks that were intentionally halted and those that failed.
*/
val STOPPED: String = "STOPPED"

/**
* Represents the status when the mapping job has been scheduled.
* This status indicates that the mapping job is planned to be executed at a later time.
*/
val SCHEDULED: String = "SCHEDULED"

/**
* Represents the status when the mapping job has been descheduled.
* This status indicates that the previously scheduled mapping job has been canceled and will not be executed.
*/
val DESCHEDULED: String = "DESCHEDULED"
}
5 changes: 5 additions & 0 deletions tofhir-server/src/main/resources/logback.xml
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,11 @@
<appender-ref ref="ASYNC-AUDIT-FLUENT" />
</logger>

<logger name="io.tofhir.engine.execution.RunningJobRegistry">
<appender-ref ref="ASYNC-AUDIT" />
<appender-ref ref="ASYNC-AUDIT-FLUENT" />
</logger>

<!-- Give me DEBUG level logs from io.tofhir package because the default is set to ERROR at root (above) -->
<logger name ="io.tofhir" level="DEBUG" />

Expand Down
Loading