@@ -84,7 +84,8 @@ def get_cmr_time(collection_id: str) -> Tuple[str, str]:
8484 url = f"https://cmr.earthdata.nasa.gov/search/collections.umm_json_v1_4?short_name={ short_name } &version={ version } "
8585 else :
8686 url = f"https://cmr.{ cmr_env } .earthdata.nasa.gov/search/collections.umm_json_v1_4?short_name={ short_name } &version={ version } "
87- logger .info (f"Requesting granule time from: { url } " )
87+
88+ logger .debug (f"Requesting granule time from: { url } " )
8889 res = requests .get (url )
8990 data = res .json ()
9091 if not data ["items" ]:
@@ -116,7 +117,7 @@ def init_collection(collection_name, collection_version, conn) -> str:
116117 collection_id = f"{ collection_name } ___{ collection_version } "
117118 try :
118119 start , end = get_cmr_time (collection_id )
119- logger .info (f"Initializing { collection_id } with { start , end } " )
120+ logger .debug (f"Retrieved temporal extent for { collection_id } : { start } to { end } " )
120121
121122 # For new collection, partition `gaps` and `reasons` tables and insert into `collections` table
122123 with conn .cursor () as cur :
@@ -148,6 +149,7 @@ def init_collection(collection_name, collection_version, conn) -> str:
148149 logger .info (
149150 f"Created gaps partition { partition_name } for collection { collection_id } "
150151 )
152+
151153 # Create partition on `reasons` table
152154 reasons_partition_name = f"reasons_{ safe_collection_id } "
153155 cur .execute (
@@ -189,11 +191,13 @@ def init_collection(collection_name, collection_version, conn) -> str:
189191 )
190192
191193 conn .commit ()
194+ logger .info (f"Successfully initialized collection { collection_id } " )
192195 return f"Initialized collection { collection_id } in table"
193196
194197 except Exception as e :
195198 conn .rollback ()
196- logger .warning (traceback .format_exc ())
199+ logger .error (f"Collection { collection_id } initialization failed: { str (e )} " )
200+ logger .debug (traceback .format_exc ())
197201 return f"Collection { collection_id } initialization failed: { str (e )} "
198202
199203
@@ -236,8 +240,10 @@ def init_migration_stream(collection_name, collection_version):
236240 )
237241 payload_response = json .loads (response ["Payload" ].read ().decode ())
238242 if response ["StatusCode" ] != 200 or payload_response .get ("statusCode" ) != 200 :
243+ logger .error (f"Migration stream invocation failed for { collection_name } v{ collection_version } " )
239244 raise Exception (f"Collection backfill failed: { payload_response .get ('body' )} " )
240245
246+ logger .info (f"Migration stream completed for { collection_name } v{ collection_version } " )
241247 return {
242248 "status" : "success" ,
243249 "statusCode" : response ["StatusCode" ],
@@ -260,11 +266,11 @@ def save_tolerance_to_dynamodb(shortname: str, versionid: str, tolerance: int):
260266 "granulegap" : tolerance ,
261267 }
262268 )
263- logger .info (
269+ logger .debug (
264270 f"Saved tolerance for { shortname } ___{ versionid } : { tolerance } seconds. PutItem Response: { response ['ResponseMetadata' ]['HTTPStatusCode' ]} "
265271 )
266272 except Exception as e :
267- logger .error (f"Failed to save tolerance to DynamoDB: { str (e )} " )
273+ logger .error (f"Failed to save tolerance to DynamoDB for { shortname } ___ { versionid } : { str (e )} " )
268274 raise
269275
270276
@@ -291,13 +297,13 @@ def lambda_handler(event: events.SQSEvent, context: Context) -> Dict[str, Any]:
291297 try :
292298 http_method = event .get ("httpMethod" , "" )
293299 resource_path = event .get ("path" , "" )
294- logger .info (f"Got HTTP { http_method } for { resource_path } " )
300+ logger .debug (f"Got HTTP { http_method } for { resource_path } " )
295301
296302 try :
297303 collections , backfill_behavior = parse_event (event )
298304 except Exception as e :
299- message = f"Error processing request: { str (e )} "
300- logger .error ( traceback . format_exc () )
305+ message = f"Invalid request format : { str (e )} "
306+ logger .warning ( message )
301307 return build_response (400 , {"message" : message })
302308
303309 if http_method != "POST" :
@@ -308,6 +314,7 @@ def lambda_handler(event: events.SQSEvent, context: Context) -> Dict[str, Any]:
308314 for collection in collections :
309315 collection_id = f"{ collection ['name' ]} ___{ collection ['version' ]} "
310316 tolerance = collection .get ("tolerance" )
317+
311318 # Update tolerance table even if the collection already exists
312319 if tolerance is not None :
313320 try :
@@ -316,45 +323,42 @@ def lambda_handler(event: events.SQSEvent, context: Context) -> Dict[str, Any]:
316323 collection ["raw_version" ],
317324 int (tolerance ),
318325 )
326+ logger .info (f"Updated tolerance for { collection ['name' ]} v{ collection ['raw_version' ]} : { tolerance } s" )
319327 except Exception as e :
320328 logger .error (
321329 f"Error saving tolerance for { collection ['name' ]} ___{ collection ['raw_version' ]} : { str (e )} "
322330 )
331+
323332 # Add collection to collections table, create partition for gaps table, set initial full gap
324333 if collection_id not in current_collections :
325- message = init_collection (
326- collection ["name" ], collection ["version" ], conn
327- )
328- logger .info (message )
334+ init_collection (collection ["name" ], collection ["version" ], conn )
329335
330336 # Kick off the migration stream
331337 try :
332- logger .info (f"Starting collection backfill" )
333338 migration_result = init_migration_stream (
334339 collection ["name" ], collection ["version" ].replace ("_" , "." )
335340 )
336- logger .info (f"Backfill result: { migration_result } " )
341+ logger .debug (f"Backfill result: { migration_result } " )
337342 except Exception as e :
338343 message = (
339344 f"Collection backfill failed for { collection_id } : { str (e )} "
340345 )
341346 logger .error (message )
342- logger .warn (
347+ logger .warning (
343348 f"Collection { collection_id } left in incomplete state, use force=True to rectify"
344349 )
345350 return build_response (500 , {"message" : message })
351+
346352 # Skip DB init but still backfill granules from CMR
347353 elif backfill_behavior .lower () == "force" :
348354 logger .info (
349355 f"Force flag detected, proceeding with backfill for existing collection: { collection_id } "
350356 )
351- # Kick off the migration stream
352357 try :
353- logger .info (f"Starting collection backfill" )
354358 migration_result = init_migration_stream (
355359 collection ["name" ], collection ["version" ].replace ("_" , "." )
356360 )
357- logger .info (f"Backfill result: { migration_result } " )
361+ logger .debug (f"Backfill result: { migration_result } " )
358362 except Exception as e :
359363 message = (
360364 f"Collection backfill failed for { collection_id } : { str (e )} "
@@ -366,11 +370,12 @@ def lambda_handler(event: events.SQSEvent, context: Context) -> Dict[str, Any]:
366370 f"Skipping initialization of { collection_id } : already exists in collection table"
367371 )
368372
373+ logger .info (f"Collection initialization completed for { len (collections )} collection(s)" )
369374 return build_response (
370375 200 , {"message" : f"Collection initialization complete for { collections } " }
371376 )
372377
373378 except Exception as e :
374- logger .error (f"Error processing request : { str (e )} " )
375- logger .error (traceback .format_exc ())
379+ logger .error (f"Unexpected error in lambda handler : { str (e )} " )
380+ logger .debug (traceback .format_exc ())
376381 return build_response (500 , {"message" : "Unexpected error occurred" })
0 commit comments