-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
274 lines (226 loc) · 13 KB
/
utils.py
File metadata and controls
274 lines (226 loc) · 13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
import csv
import difflib
import os
from io import StringIO
import yaml
import pandas as pd
def detect_separator(filepath):
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
first_line = f.readline()
for delimiter in [',', ';']:
if delimiter in first_line:
return delimiter
def rename_columns(directory):
print(f"Renaming columns...")
for file in os.listdir(directory):
if file.endswith(".csv"):
current_path = os.path.join(directory, file)
df = pd.read_csv(current_path, sep=";")
# REDCap data for drops record ID 31
if file.startswith("documentation"):
df = df.rename(columns={
df.columns[0]: "participant_identifier",
df.columns[1]: "event_name",
df.columns[2]: "Has the first contact already been established?",
df.columns[3]: "On which unit is the candidate?",
df.columns[4]: "What are the reasons why the participant has not yet been approached for the study? (choice=Feeling of the practitioner/study staff that patient is currently too severely ill)",
df.columns[5]: "What are the reasons why the participant has not yet been approached for the study? (choice=assumed language barrier)",
df.columns[6]: "What are the reasons why the participant has not yet been approached for the study? (choice=An attempt was made to establish contact, but the patient could not be found on the unit. In this case: leave a flyer for the patient with ward staff)",
df.columns[7]: "What are the reasons why the participant has not yet been approached for the study? (choice=Patient has already refused study participation previously)",
df.columns[8]: "What are the reasons why the participant has not yet been approached for the study? (choice=other)",
df.columns[9]: "Optional: Here is place to enter another possible reason why no contact could be established yet:",
df.columns[10]: "After the first contact: What were the reasons that the candidate did not participate in the educational interview for the study? (choice=too severe illness at the moment)",
df.columns[11]: "After the first contact: What were the reasons that the candidate did not participate in the educational interview for the study? (choice=not able to speak local language properly)",
df.columns[12]: "After the first contact: What were the reasons that the candidate did not participate in the educational interview for the study? (choice=treating clinician does not want to participate in IMMERSE)",
df.columns[13]: "After the first contact: What were the reasons that the candidate did not participate in the educational interview for the study? (choice=declined to participate without indicating reasons)",
df.columns[14]: "After the first contact: What were the reasons that the candidate did not participate in the educational interview for the study? (choice=other:)",
df.columns[15]: "Optional: Here is space to enter another possible reason for declining participation in the study:",
df.columns[16]: "Did an educational interview to inform about the study take place?",
df.columns[17]: "If an educational interview has not yet taken place: What were the reasons why the educational interview was not carried out? (choice=Participant has not appeared) ",
df.columns[18]: "If an educational interview has not yet taken place: What were the reasons why the educational interview was not carried out? (choice=Participant has changed her mind & canceled)",
df.columns[19]: "If an educational interview has not yet taken place: What were the reasons why the educational interview was not carried out? (choice=Informed consent sheet was not available during educational interview)",
df.columns[20]: "If an educational interview has not yet taken place: What were the reasons why the educational interview was not carried out? (choice=Other)",
df.columns[21]: "Optional: Here is space to specify further reasons:",
})
else:
df = df.rename(columns={
df.columns[0]: "participant_identifier",
df.columns[1]: "event_name",
df.columns[2]: "consent",
df.columns[3]: "condition",
df.columns[4]: "t1_dropout",
df.columns[5]: "t2_dropout",
df.columns[6]: "t3_dropout"
})
filename = os.path.basename(current_path)
print(f"Renaming {filename}... ")
new_filepath = os.path.join(directory, f"redcap_{filename}")
df.to_csv(new_filepath, sep=";", index=False)
def filter_and_rename_values_in_df(directory):
print(f"Filtering and renaming values...")
redcap_values = [
"In contact (Arm 2: In contact)",
"In contact (Arm 2: In contact)",
"First contact (Arm 2: In contact)",
"Baseline (Arm 1: Included)",
"T1 (Arm 1: Included)",
"T2 (Arm 1: Included)",
"T3 (Arm 1: Included)"
]
for filename in os.listdir(directory):
if filename.startswith("redcap") and filename.endswith(".csv"):
df = pd.read_csv(os.path.join(directory, filename), sep=";")
df_filtered = df[df["event_name"].isin(redcap_values)]
df_filtered["event_name"] = df_filtered["event_name"].replace(
{
"In contact (Arm 2: In contact)": "In contact",
"First contact (Arm 2: In contact)": "In contact",
"In contact (Arm 2: In contact)": "In contact",
"Baseline (Arm 1: Included)": "Baseline",
"T1 (Arm 1: Included)": "T1",
"T2 (Arm 1: Included)": "T2",
"T3 (Arm 1: Included)": "T3"
}
)
print(f"Filtering and renaming values from {filename}... ")
new_filepath = os.path.join(directory, f"filtered_{filename}")
df_filtered.to_csv(new_filepath, sep=";", index=False)
def detect_separator(filepath):
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
first_line = f.readline()
for delimiter in [',', ';']:
if delimiter in first_line:
return delimiter
def rename_files(filepath, word_to_replace, replacement):
for file in os.listdir(filepath):
if not file.startswith("ITEM") and file.endswith(".csv"):
if word_to_replace in file:
new_name = file.replace(file, f"{replacement}_{file}")
print(new_name)
os.rename(os.path.join(filepath, file), os.path.join(filepath, new_name))
else:
print(f"{word_to_replace} not found in {file}")
def load_config_file(directory, file):
base_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(base_dir, "config.yaml")
with open(config_path, "r", encoding="utf-8") as f:
config = yaml.safe_load(f)
if file:
return config[directory][file]
else:
return config[directory]
def write_config_file(filepath, file, key="data_requirements"):
base_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(base_dir, "config.yaml")
def find_file_key(data, search_key, target_directory):
if isinstance(data, dict):
for k, v in data.items():
# Only search inside the desired key
if k == search_key and isinstance(v, dict):
for sub_key, sub_value in v.items():
if isinstance(sub_value, str) and (os.path.isdir(sub_value) or sub_value == target_directory):
return sub_key
elif isinstance(v, dict):
result = find_file_key(v, search_key, target_directory)
if result:
return result
return None
if os.path.exists(config_path):
with open(config_path, "r", encoding="utf-8") as f:
config = yaml.safe_load(f)
key_file = find_file_key(config, key, filepath)
else:
config = {}
full_file_path = os.path.join(filepath, file)
config[key][key_file] = full_file_path
with open(config_path, "w", encoding="utf-8") as f:
yaml.safe_dump(config, f, default_flow_style=False)
return full_file_path
def merge_files(source_path, new_filename):
all_dataframes = []
for filename in os.listdir(source_path):
if filename.endswith('.xlsx') or filename.endswith('.csv'):
if filename.startswith('Logins'):
filepath = os.path.join(source_path, filename)
df = pd.read_csv(filepath, sep=';', encoding='latin1') if filepath.endswith('.csv') else pd.read_excel(filepath)
all_dataframes.append(df)
merged_dataframes = pd.concat(all_dataframes, ignore_index=True).drop_duplicates()
merged_dataframes.to_excel(os.path.join(source_path, new_filename), index=False)
print("Merging done.", source_path)
return merged_dataframes
def get_unique_values_from_columns(df, column1, column2, directory, new_filename):
df[column1] = df[column1].astype(str).str.strip()
df[column2] = df[column2].astype(str).str.strip()
df_unique = df[[column1, column2]].drop_duplicates().reset_index(drop=True)
output_path = os.path.join(directory, new_filename)
df_unique.to_csv(output_path, index=False, sep=";")
return df_unique
def merge_name_surname_id_clinicians(df1, df2, path):
df1 = pd.read_excel(df1)
df2 = pd.read_excel(df2)
df1 = df1.rename(columns={"FirstName": "firstName", "LastName": "lastName"})
df1.info()
df2.info()
df1["complete_key"] = df1["firstName"].notna()
df1["match_name"] = df1["firstName"].where(df1["complete_key"], None)
df1["match_surname"] = df1["lastName"]
df2["match_name"] = df2["firstName"]
df2["match_surname"] = df2["lastName"]
full_match = df1[df1["complete_key"]].merge(df2,
on=["match_name", "match_surname"],
how="right",
suffixes=("_df1", "_df2")
)
only_surname_match = df1[~df1["complete_key"]].merge(
df2,
on=["match_surname"],
how="inner",
suffixes=("_df1", "_df2")
)
merged = df2.copy()
merged["clinician_identifier"] = full_match["clinician_identifier"].combine_first(only_surname_match["clinician_identifier"])
merged.to_excel(os.path.join(path, "merged_logins_2025-2025_extracted_names.xlsx"), index=False)
return merged
def prepare_login_files(login_directory):
# Logins: Function used to map DMMH clinicians with login files.
id_reference_clinicians = "2025-12-02_dmmh_id_map_clinicians_(logins).xlsx"
all_logins_merged = merge_files(source_path=login_directory, new_filename="merged_logins_2023-2025.xlsx")
get_unique_values_from_columns(
df=all_logins_merged,
column1="firstName",
column2="lastName",
directory=login_directory,
new_filename='unique_therapy_design_names.xlsx'
)
all_logins_merged = pd.read_excel(os.path.join(login_directory, "merged_logins_2022-2025.xlsx"))
unique_clinicians_with_ids = pd.read_excel(os.path.join(login_directory, id_reference_clinicians))
all_logins_merged_with_identified_ids = (all_logins_merged.merge(
unique_clinicians_with_ids,
on=["firstName", "lastName"], how="inner")
.dropna(subset="clinician_identifier")
.drop(columns=["firstName", "lastName"]))
all_logins_merged_with_identified_ids.to_excel(
os.path.join(login_directory, "merged_logins_2022-2025_clinician_ids_v2.xlsx"), index=False)
get_unique_values_from_columns(
df=all_logins_merged_with_identified_ids,
column1="clinician_identifier",
column2="userId",
directory=login_directory,
new_filename='2025-12-12_dmmh_id_map_clinicians_v2.csv'
)
def merge_files_maganamed(directory1, directory2):
# Function used to merge files from "Main" with the rest of the locations for RecordID22
target_files = {}
for file in os.listdir(directory1):
if file.endswith(".csv"):
filepath = os.path.join(directory1, file)
df = pd.read_csv(filepath, sep=";")
target_files[file] = df
print(target_files.keys())
for original_file in os.listdir(directory2):
for target_file, target_df in target_files.items():
if target_file == original_file:
filepath = os.path.join(directory2, original_file)
new_df = pd.read_csv(filepath, sep=";")
concat = pd.concat([target_df, new_df])
concat.to_csv(os.path.join(os.path.dirname(directory2), target_file), sep=";", index=False)