Skip to content

Commit ef14ee4

Browse files
committed
ruff reformating
1 parent cbf6ff0 commit ef14ee4

35 files changed

Lines changed: 258 additions & 253 deletions

iglu_python/adrr.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
import warnings
21

32
import numpy as np
43
import pandas as pd
54

65
from .utils import check_data_columns
76

7+
88
def adrr(data: pd.DataFrame|pd.Series) -> pd.DataFrame|float:
99
"""
1010
Calculate average daily risk range (ADRR)
@@ -83,7 +83,7 @@ def adrr_single(data: pd.DataFrame|pd.Series) -> float:
8383
data_filtered = data.dropna()
8484
if len(data_filtered) == 0:
8585
return np.nan
86-
86+
8787
# Group by date and calculate daily risk for each day
8888
daily_risks = data_filtered.groupby(data_filtered.index.date).apply(
8989
lambda x: _calculate_daily_risk(x)

iglu_python/ea1c.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,9 @@ def ea1c_single(data: pd.Series) -> float:
7272
"""Calculate eA1C for a single subject"""
7373
if not isinstance(data, pd.Series):
7474
raise ValueError("Data must be a pandas Series")
75-
75+
7676
data = data.dropna()
7777
if len(data) == 0:
7878
return np.nan
7979

80-
return (46.7 + data.mean()) / 28.7
80+
return (46.7 + data.mean()) / 28.7

iglu_python/grade_eugly.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
def grade_eugly(
1111
data: Union[pd.DataFrame, pd.Series, np.ndarray, list], lower: int = 70, upper: int = 140
12-
) -> pd.DataFrame|float:
12+
) -> pd.DataFrame|float:
1313
"""
1414
Calculate percentage of GRADE score attributable to target range.
1515
@@ -63,7 +63,7 @@ def grade_eugly(
6363
if isinstance(data, (np.ndarray, list)):
6464
data = pd.Series(data)
6565
return grade_eugly_single(data, lower, upper)
66-
66+
6767
# Handle DataFrame input
6868
data = check_data_columns(data)
6969

@@ -91,4 +91,4 @@ def grade_eugly_single(data: pd.Series, lower: int = 70, upper: int = 140) -> fl
9191
return np.nan
9292

9393
eugly_percent = (np.sum(grade_scores[in_range]) / total_grade) * 100
94-
return eugly_percent
94+
return eugly_percent

iglu_python/grade_hyper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,12 @@ def grade_hyper_single(data: pd.Series, upper: int = 140) -> float:
7575

7676
# Calculate GRADE scores
7777
grade_scores = _grade_formula(data)
78-
78+
7979
# Calculate percentage above upper bound
8080
above_upper = data > upper
8181
total_grade = np.sum(grade_scores)
8282
if total_grade == 0:
8383
return np.nan
8484

8585
hyper_percent = (np.sum(grade_scores[above_upper]) / total_grade) * 100
86-
return hyper_percent
86+
return hyper_percent

iglu_python/grade_hypo.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def grade_hypo(data: Union[pd.DataFrame, pd.Series, np.ndarray, list], lower: in
5858
if isinstance(data, (np.ndarray, list)):
5959
data = pd.Series(data)
6060
return grade_hypo_single(data, lower)
61-
61+
6262
# Handle DataFrame input
6363
data = check_data_columns(data)
6464

@@ -77,12 +77,12 @@ def grade_hypo_single(data: pd.Series, lower: int = 80) -> float:
7777

7878
# Calculate GRADE scores
7979
grade_scores = _grade_formula(data)
80-
80+
8181
# Calculate percentage below lower bound
8282
below_lower = data < lower
8383
total_grade = np.sum(grade_scores)
8484
if total_grade == 0:
8585
return np.nan
8686

8787
hypo_percent = (np.sum(grade_scores[below_lower]) / total_grade) * 100
88-
return hypo_percent
88+
return hypo_percent

iglu_python/gvp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def gvp(data: Union[pd.DataFrame, pd.Series]) -> pd.DataFrame|float:
6363
return gvp_single(data)
6464

6565
# Handle DataFrame input
66-
data = check_data_columns(data)
66+
data = check_data_columns(data)
6767
data.set_index("time", inplace=True, drop=True)
6868

6969
out = data.groupby('id').agg(
@@ -138,4 +138,4 @@ def gvp_single(subj_data):
138138
if base_length == 0:
139139
return np.nan
140140

141-
return (np.sum(added_length) / base_length - 1) * 100
141+
return (np.sum(added_length) / base_length - 1) * 100

iglu_python/hbgi.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,4 +88,4 @@ def calculate_hbgi_single(glucose_values: pd.Series) -> float:
8888
n = len(glucose_values)
8989
hbgi_value = 10 * np.sum(fbg[glucose_values >= 112.5] ** 2) / n
9090

91-
return hbgi_value
91+
return hbgi_value

iglu_python/hyper_index.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,4 +90,4 @@ def hyper_index_single(
9090
hyper_values = gl[gl > ULTR] - ULTR
9191
hyper_index = np.sum(hyper_values**a) / (len(gl) * c)
9292

93-
return hyper_index
93+
return hyper_index

iglu_python/hypo_index.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def hypo_index(
6767
if isinstance(data, (np.ndarray, list)):
6868
data = pd.Series(data)
6969
return hypo_index_single(data, LLTR, b, d)
70-
70+
7171
data = check_data_columns(data)
7272
out = data.groupby('id').agg(
7373
hypo_index = ("gl", lambda x: hypo_index_single(x, LLTR, b, d))

iglu_python/igc.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,12 @@ def igc(
8686
return out
8787

8888
def igc_single(
89-
gl: pd.Series,
90-
LLTR: int = 80,
91-
ULTR: int = 140,
92-
a: float = 1.1,
93-
b: float = 2,
94-
c: int = 30,
89+
gl: pd.Series,
90+
LLTR: int = 80,
91+
ULTR: int = 140,
92+
a: float = 1.1,
93+
b: float = 2,
94+
c: int = 30,
9595
d: int = 30
9696
) -> float:
9797
"""
@@ -102,4 +102,4 @@ def igc_single(
102102
out_hypo = hypo_index(gl, LLTR=LLTR, b=b, d=d)
103103

104104
out = out_hyper + out_hypo
105-
return out
105+
return out

0 commit comments

Comments
 (0)