Skip to content
Snippets Groups Projects
Commit 766e45e6 authored by Ann-Kathrin Margarete Edrich's avatar Ann-Kathrin Margarete Edrich
Browse files

Fix user input check to accept csv for non-landslide database

parent 414b04e4
No related branches found
No related tags found
No related merge requests found
......@@ -10,12 +10,3 @@ Susceptibility and Hazard mappIng fRamEwork SHIRE
example
example-plain
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
......@@ -265,16 +265,17 @@ class create_training_data:
"""
if self.properties_train['nonls_path'].split('.')[-1] == 'csv':
self.absence = pd.read_csv(self.properties_train['nonls_path'])
self.df_absence = pd.read_csv(self.properties_train['nonls_path'])
self.absence = self.absence.rename(
self.df_absence = self.df_absence.rename(
columns={
self.properties_train['x_nonls']: self.properties_train['x'],
self.properties_train['y_nonls']: self.properties_train['y']})
nonls_id = [
'nonls_event_' + str(i) for i in range(len(self.absence))]
self.absence.insert(0, self.properties_train['id'], nonls_id)
'nonls_event_' + str(i) for i in range(len(self.df_absence))]
if self.properties_train['id'] not in self.df_absence.columns.tolist():
self.df_absence.insert(0, self.properties_train['id'], nonls_id)
self.logger.info('Absence locations added')
......
key,type,range,extension,path
ls_path,str,None,csv,1
nonls_path,str,None,nc,1
nonls_path,str,None,"nc,csv",1
train_path,str,None,csv,1
geo_path,str,None,csv,1
feat_path,str,None,csv,1
......
......@@ -199,19 +199,37 @@ class create_training_data:
Supplement presence data with absence data. It needs to be
pre-generated.
"""
if settings.path_nonls_locations.split('.')[-1] == 'csv':
self.df_absence = pd.read_csv(settings.path_nonls_locations)
self.df_absence = self.df_absence.rename(
columns={
settings.nonls_database_x: settings.landslide_database_x,
settings.nonls_database_y: settings.landslide_database_y})
ds = nc.Dataset(settings.path_nonls_locations)
nonls_id = [
'nonls_event_' + str(i) for i in range(len(self.df_absence))]
if settings.ID not in self.df_absence.columns.tolist():
self.df_absence.insert(0, settings.ID, nonls_id)
x = ds[settings.nonls_database_x][:].data
y = ds[settings.nonls_database_y][:].data
self.df_absence = pd.DataFrame(index=range(len(x)),
columns=list(self.df_train.columns))
self.logger.info('Absence locations added')
elif settings.path_nonls_locations.split('.')[-1] == 'nc':
ds = nc.Dataset(settings.path_nonls_locations)
x = ds[settings.nonls_database_x][:].data
y = ds[settings.nonls_database_y][:].data
self.df_absence[settings.ID] = ['nonls_event_' + str(i)
for i in range(len(x))]
self.df_absence[settings.landslide_database_x] = list(x)
self.df_absence[settings.landslide_database_y] = list(y)
self.df_absence = pd.DataFrame(index=range(len(x)),
columns=list(self.df_train.columns))
self.df_absence[settings.ID] = [
'nonls_event_' + str(i) for i in range(len(x))]
self.df_absence[settings.landslide_database_x] = list(x)
self.df_absence[settings.landslide_database_y] = list(y)
self.logger.info('Absence locations added')
def label_training_data(self):
......@@ -576,6 +594,9 @@ class create_training_data:
"""
Determine if the extent of one or several
clusters are too large for local interpolation
Input:
-
Output:
num_bb: list, names of clusters that need reclustering
......
key,type,range,extension,path
ls_path,str,None,csv,1
nonls_path,str,None,nc,1
nonls_path,str,None,"nc,csv",1
train_path,str,None,csv,1
geo_path,str,None,csv,1
feat_path,str,None,csv,1
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment