You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
76 lines
2.0 KiB
76 lines
2.0 KiB
import pandas as pd
|
|
import numpy as np
|
|
import yaml
|
|
import gzip
|
|
|
|
import os.path
|
|
|
|
from scipy.signal import savgol_filter
|
|
|
|
|
|
def load_data_RH_logger(filepath, every=1):
|
|
"""
|
|
|
|
"""
|
|
# Read the header
|
|
if filepath.endswith('gz'):
|
|
with gzip.open(filepath, 'rt') as f:
|
|
header = f.readline()
|
|
else:
|
|
with open(filepath, 'r') as f:
|
|
header = f.readline()
|
|
|
|
# reorganize header
|
|
header = header.strip('# ').rstrip('\n').split('|')
|
|
header.append('X') # Empty col...
|
|
|
|
# Load with pandas
|
|
if filepath.endswith('gz'):
|
|
df = pd.read_csv(filepath, sep=' ', compression='gzip', names=header, skiprows=1)
|
|
else:
|
|
df = pd.read_csv(filepath, sep=' ', names=header, skiprows=1)
|
|
|
|
# Clean up
|
|
df = df.drop(columns='X')
|
|
df = df.drop(np.arange(1))
|
|
|
|
# Crop data
|
|
df = df.reset_index()
|
|
del df['index']
|
|
|
|
return df.iloc[::every]
|
|
|
|
|
|
def process_data_RH_logger(filepath, every):
|
|
df = load_data_RH_logger(filepath, every=1)
|
|
|
|
# Variation
|
|
df['dm'] = df['weight'] - df['weight'].iloc[0]
|
|
df['dm_m'] = (df['weight'] - df['weight'].iloc[0]) / df['weight'].iloc[0]
|
|
|
|
# weight normalized between 0 and 1 begin to end
|
|
mf = df['dm_m'].tail(300).mean()
|
|
df['m_mf'] = df['dm_m'] / mf
|
|
|
|
# Derivative
|
|
delta = np.mean(df['time'].diff())
|
|
df['dMdt_SG'] = savgol_filter(df['m_mf'], window_length=10000, polyorder=1, deriv=1, delta=delta)
|
|
df['dMdt_diff'] = df['m_mf'].diff(periods=1000) / df['time'].diff(periods=1000)
|
|
|
|
df['dweightdt_SG'] = savgol_filter(df['weight'], window_length=10000, polyorder=1, deriv=1, delta=delta)
|
|
df['dweightdt_diff'] = df['weight'].diff(periods=1000) / df['time'].diff(periods=1000)
|
|
|
|
h5path = os.path.splitext(filepath)[0]
|
|
h5path += '-processed.h5'
|
|
df = df.iloc[::every]
|
|
df.to_hdf(h5path, key='data')
|
|
|
|
|
|
def load_metadata(filepath):
|
|
"""
|
|
|
|
"""
|
|
with open(filepath, 'r') as stream:
|
|
metadata = yaml.safe_load(stream)
|
|
return metadata
|