|
3 | 3 |
|
4 | 4 | Attributes
|
5 | 5 | ----------
|
6 |
| -cols : list of str |
7 |
| - List of all feature labels + StepLabel |
8 | 6 | row_count : int
|
9 | 7 | No. of rows of dataset_operations to be used
|
10 | 8 | RF_ESTIMATORS : int
|
|
34 | 32 |
|
35 | 33 | DATA_PATH : str
|
36 | 34 | loading in the actual dataset_operations for one sensor (Data under test)
|
37 |
| -NEW_DATASET : str |
| 35 | +PROCESSED_DATASET : str |
38 | 36 | Directory name for new data set which contains the training/testing data for the classifier
|
39 |
| -NEW_DATASET_PATH : str |
| 37 | +PROCESSED_DATASET_PATH : str |
40 | 38 | Directory path for new data set which contains the training/testing data for the classifier
|
41 | 39 | TRAINED_MODEL_DIR : str
|
42 | 40 | Trained Model directory name
|
|
60 | 58 | from sklearn.preprocessing import MinMaxScaler
|
61 | 59 | from sklearn.ensemble import RandomForestClassifier
|
62 | 60 | from sklearn.model_selection import train_test_split
|
63 |
| -from config import new_sensor_paths, ROOT, Path |
| 61 | +from config import new_sensor_paths, ROOT, Path, DATASETS |
64 | 62 |
|
65 | 63 | # Configuring locale for datetime purposes
|
66 |
| -lang = 'de_DE' |
67 |
| -locale.setlocale(locale.LC_ALL, lang) |
| 64 | +lang = 'de_DE.UTF-8' |
| 65 | +locale.setlocale(locale.LC_TIME, lang) |
68 | 66 |
|
69 | 67 | # Model Configuration Variables
|
70 |
| -# list of all feature labels + StepLabel |
71 |
| -cols = pd.read_csv(f'{new_sensor_paths[0]}/{os.listdir(new_sensor_paths[0])[0]}', sep='\t', index_col=0).columns |
72 | 68 | # Setting numpy print precision
|
73 | 69 | np.set_printoptions(precision=5)
|
74 | 70 | # no. of rows of dataset_operations to be used
|
|
99 | 95 | EXPORT_MODEL = False
|
100 | 96 |
|
101 | 97 | # Paths
|
102 |
| -# loading in the actual dataset_operations for the ML classifier |
103 |
| -DATA_PATH = Path(f"{ROOT}/Features_Dataset/ds_all.csv") |
104 | 98 | # Directory name for new data set which contains the training/testing data for the classifier
|
105 |
| -NEW_DATASET = "Features_Dataset" |
| 99 | +PROCESSED_DATASET = "Processed_Dataset" |
106 | 100 | # Directory path for new data set which contains the training/testing data for the classifier
|
107 |
| -NEW_DATASET_PATH = Path(f'{ROOT}/{NEW_DATASET}') |
| 101 | +PROCESSED_DATASET_PATH = Path(f'{DATASETS}/{PROCESSED_DATASET}') |
| 102 | +# loading in the actual dataset for the ML classifier |
| 103 | +DATA_PATH = Path(f"{PROCESSED_DATASET_PATH}/ds_all.csv") |
108 | 104 | # Trained Model directory name
|
109 |
| -TRAINED_MODEL_DIR = 'Trained_Models' |
| 105 | +TRAINED_MODEL_DIR = 'Trained Models' |
110 | 106 | # Trained Model directory path
|
111 | 107 | TRAINED_MODEL_PATH = Path(f'{ROOT}/{TRAINED_MODEL_DIR}')
|
112 | 108 | # Trained Model name
|
|
0 commit comments