I have setup a state machine from AWS step function that will create a EMR cluster, add few emr steps and then terminate the cluster. This is working fine as long as all the steps are running to completion without any errors. If a step fails, despite adding a catch to proceed to the next step, this is not happening. Whenever a step fails, the step is marked as caught(in ornage color in graph) but the next step is marked as cancelled.
This is my step function definition if it helps:
{
"StartAt": "MyEMR-SMFlowContainer-beta",
"States": {
"MyEMR-SMFlowContainer-beta": {
"Type": "Parallel",
"End": true,
"Branches": [
{
"StartAt": "CreateClusterStep-feature-generation-cluster-beta",
"States": {
"CreateClusterStep-feature-generation-cluster-beta": {
"Next": "Step-SuccessfulJobOne",
"Type": "Task",
"ResultPath": "$.Cluster.1.CreateClusterTask",
"Resource": "arn:aws:states:::elasticmapreduce:createCluster.sync",
"Parameters": {
"Instances": {
"Ec2SubnetIds": [
"subnet-*******345fd38423"
],
"InstanceCount": 2,
"KeepJobFlowAliveWhenNoSteps": true,
"MasterInstanceType": "m4.xlarge",
"SlaveInstanceType": "m4.xlarge"
},
"JobFlowRole": "MyEMR-emrInstance-beta-EMRInstanceRole",
"Name": "emr-step-fail-handle-test-cluster",
"ServiceRole": "MyEMR-emr-beta-EMRRole",
"Applications": [
{
"Name": "Spark"
},
{
"Name": "Hadoop"
}
],
"AutoScalingRole": "MyEMR-beta-FeatureG-CreateClusterStepfeature-NJB2UG1J1EWB",
"Configurations": [
{
"Classification": "spark-env",
"Configurations": [
{
"Classification": "export",
"Properties": {
"PYSPARK_PYTHON": "/usr/bin/python3"
}
}
]
}
],
"LogUri": "s3://MyEMR-beta-feature-createclusterstepfeature-1jpp1wp3dfn04/emr/logs/",
"ReleaseLabel": "emr-5.32.0",
"VisibleToAllUsers": true
}
},
"Step-SuccessfulJobOne": {
"Next": "Step-AlwaysFailingJob",
"Catch": [
{
"ErrorEquals": [
"States.ALL"
],
"Next": "Step-AlwaysFailingJob"
}
],
"Type": "Task",
"TimeoutSeconds": 7200,
"ResultPath": "$.ClusterStep.SuccessfulJobOne.AddSparkTask",
"Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
"Parameters": {
"ClusterId.$": "$.Cluster.1.CreateClusterTask.ClusterId",
"Step": {
"Name": "SuccessfulJobOne",
"ActionOnFailure": "CONTINUE",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--deploy-mode",
"client",
"--master",
"yarn",
"--conf",
"spark.logConf=true",
"--class",
"com.test.sample.core.EMRJobRunner",
"s3://my-****-bucket/jars/77/my-****-bucketBundleJar-1.0.jar",
"--JOB_NUMBER",
"1",
"--JOB_KEY",
"SuccessfulJobOne"
]
}
}
}
},
"Step-AlwaysFailingJob": {
"Next": "Step-SuccessfulJobTwo",
"Catch": [
{
"ErrorEquals": [
"States.ALL"
],
"Next": "Step-SuccessfulJobTwo"
}
],
"Type": "Task",
"TimeoutSeconds": 7200,
"ResultPath": "$.ClusterStep.AlwaysFailingJob.AddSparkTask",
"Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
"Parameters": {
"ClusterId.$": "$.Cluster.1.CreateClusterTask.ClusterId",
"Step": {
"Name": "AlwaysFailingJob",
"ActionOnFailure": "CONTINUE",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--deploy-mode",
"client",
"--master",
"yarn",
"--conf",
"spark.logConf=true",
"--class",
"com.test.sample.core.EMRJobRunner",
"s3://my-****-bucket/jars/77/my-****-bucketBundleJar-1.0.jar",
"--JOB_NUMBER",
"2",
"--JOB_KEY",
"AlwaysFailingJob"
]
}
}
}
},
"Step-SuccessfulJobTwo": {
"Next": "TerminateClusterStep-feature-generation-cluster-beta",
"Catch": [
{
"ErrorEquals": [
"States.ALL"
],
"Next": "TerminateClusterStep-feature-generation-cluster-beta"
}
],
"Type": "Task",
"TimeoutSeconds": 7200,
"ResultPath": "$.ClusterStep.SuccessfulJobTwo.AddSparkTask",
"Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
"Parameters": {
"ClusterId.$": "$.Cluster.1.CreateClusterTask.ClusterId",
"Step": {
"Name": "DeviceJob",
"ActionOnFailure": "CONTINUE",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--deploy-mode",
"client",
"--master",
"yarn",
"--conf",
"spark.logConf=true",
"--class",
"com.test.sample.core.EMRJobRunner",
"s3://my-****-bucket/jars/77/my-****-bucketBundleJar-1.0.jar",
"--JOB_NUMBER",
"3",
"--JOB_KEY",
"SuccessfulJobTwo"
]
}
}
}
},
"TerminateClusterStep-feature-generation-cluster-beta": {
"End": true,
"Type": "Task",
"ResultPath": null,
"Resource": "arn:aws:states:::elasticmapreduce:terminateCluster.sync",
"Parameters": {
"ClusterId.$": "$.Cluster.1.CreateClusterTask.ClusterId"
}
}
}
}
]
}
},
"TimeoutSeconds": 43200
}
Can somebody please advice on how I can catch a failure in step and ignore it add the next step. Thanks in advance.
Issue was because I was not specifying the resultPath in catch properties. This was causing the resultPath to be overwritten by the catch block since default value of resultPath is $. Next step is not able to get the cluster information since that is overwritten and hence got cancelled.
"Catch": [
{
"ErrorEquals": [
"States.ALL"
],
"Next": "Step-SuccessfulJobTwo"
}
],
Once I updated the catch to have a proper result path, it was working as expected.
"Catch": [
{
"ErrorEquals": [
"States.ALL"
],
"Next": "Step-SuccessfulJobTwo",
"ResultPath": "$.ClusterStep.SuccessfulJobOne.AddSparkTask.Error",
}
],