@@ -168,8 +168,15 @@ def episode_calculation(
168168 subject_episode_data ['id' ] = subject_id
169169
170170 # Append to main dataframes
171- episode_data_df = pd .concat ([episode_data_df , subject_episode_data ], ignore_index = True )
172- episode_summary_df = pd .concat ([episode_summary_df , subject_summary ], ignore_index = True )
171+ if episode_data_df .empty :
172+ episode_data_df = subject_episode_data
173+ else :
174+ episode_data_df = pd .concat ([episode_data_df , subject_episode_data ], ignore_index = True )
175+
176+ if episode_summary_df .empty :
177+ episode_summary_df = subject_summary
178+ else :
179+ episode_summary_df = pd .concat ([episode_summary_df , subject_summary ], ignore_index = True )
173180
174181
175182
@@ -238,7 +245,7 @@ def episode_single(
238245 day_one = day_one .tz_convert (local_tz )
239246 ndays = len (gd2d_tuple [1 ])
240247 # generate grid times by starting from day one and cumulatively summing
241- time_ip = pd .date_range (start = day_one + pd .Timedelta (minutes = dt0 ), periods = ndays * 24 * 60 / dt0 , freq = f"{ dt0 } min" )
248+ time_ip = pd .date_range (start = day_one + pd .Timedelta (minutes = dt0 ), periods = int ( ndays * 24 * 60 / dt0 ) , freq = f"{ dt0 } min" )
242249 data_ip = gd2d_tuple [0 ].flatten ().tolist ()
243250 new_data = pd .DataFrame ({
244251 "time" : time_ip ,
@@ -305,22 +312,17 @@ def episode_single(
305312 )
306313
307314
308- # Add exclusive labels
309- def hypo_exclusion_logic (group_df ):
310- # group_df is a DataFrame with all columns for the current group
311- if (group_df ['lv2_hypo' ] > 0 ).any ():
312- return pd .Series ([0 ] * len (group_df ), index = group_df .index )
313- else :
314- return group_df ['lv1_hypo' ]
315- ep_per_seg ['lv1_hypo_excl' ] = ep_per_seg .groupby (['segment' , 'lv1_hypo' ]).apply (hypo_exclusion_logic , include_groups = True ).reset_index (level = [0 ,1 ], drop = True ).values .flatten ()
316-
317- def hyper_exclusion_logic (group_df ):
318- # group_df is a DataFrame with all columns for the current group
319- if (group_df ['lv2_hyper' ] > 0 ).any ():
320- return pd .Series ([0 ] * len (group_df ), index = group_df .index )
321- else :
322- return group_df ['lv1_hyper' ]
323- ep_per_seg ['lv1_hyper_excl' ] = ep_per_seg .groupby (['segment' , 'lv1_hyper' ]).apply (hyper_exclusion_logic , include_groups = True ).reset_index (level = [0 ,1 ], drop = True ).values .flatten ()
315+ # Add exclusive labels using the correct original logic without DeprecationWarning
316+ # For hypo exclusion: group by both segment and lv1_hypo, set to 0 if any lv2_hypo > 0 in that group
317+ def calculate_exclusion (df , lv1_col , lv2_col ):
318+ """Calculate exclusion labels for lv1 episodes based on lv2 episodes in same group"""
319+ df = df .copy ()
320+ df ['group_id' ] = df .groupby (['segment' , lv1_col ]).ngroup ()
321+ group_has_lv2 = df .groupby ('group_id' )[lv2_col ].transform (lambda x : (x > 0 ).any ())
322+ return df [lv1_col ].where (~ group_has_lv2 , 0 )
323+
324+ ep_per_seg ['lv1_hypo_excl' ] = calculate_exclusion (ep_per_seg , 'lv1_hypo' , 'lv2_hypo' )
325+ ep_per_seg ['lv1_hyper_excl' ] = calculate_exclusion (ep_per_seg , 'lv1_hyper' , 'lv2_hyper' )
324326
325327 full_segment_df = pd .concat ([segment_data , ep_per_seg .drop (["segment" ], axis = 1 )], axis = 1 )
326328
0 commit comments