
# Usage: Run this script from the directory that it is in.
# python tissuerepeatabilityT_plots.py <path to data xml>.
#This script can handle 1-4 tests of the same sample.

#(base) lri-107072:Python klonowe$ python tissuerepeatabilityT.py ../Data/CMULTIS003/023_new/023.xml


#/Users/klonowe/TissueTesting/app/TissueTesting/Data/TMULTIS013/TMULTIS013_UL_S_L2C_001/TMULTIS013_UL_S_L2C_001.xml
import sys
import numpy as np
import matplotlib.pyplot as plt
# from scipy import signal processing function. Not all used.
from scipy.signal import butter, lfilter, freqz, filtfilt
from scipy.optimize import curve_fit

# these are just for finding file paths
import os
import pandas as pd
import ntpath
import xmltodict
from scipy import stats
import xml.etree.ElementTree as ET
import pickle

#add modulus covariance
def parseTestXML(xmlname):

    testData = {}
    doc = ET.parse(xmlname)

    root = doc.getroot()
    file_list = []
    for test in root.findall("Test"):
        file_list.append(test.find("file").text)
        testData[test.find("file").text] = {}
        print(test.find('file').text)
        for child in test:
            if child.tag != 'file':
                testData[test.find("file").text][child.tag] = child.text
    return file_list, testData

def butter_lowpass(cutoff, fs, order=3):        #filter out data greater than a certain frequency
    nyq = 0.5 * fs
    normal_cutoff = cutoff / nyq
    b, a = butter(order, normal_cutoff, btype='low', analog=False)
    return b, a


def butter_lowpass_filter(data, cutoff, fs, order):
    b, a = butter_lowpass(cutoff, fs, order=order)
    #         y = lfilter(b, a, data)
    y = filtfilt(b, a, data, method="gust")
    return y

xmlname = sys.argv[-1]
#xmlname='/Users/klonowe/TissueTesting/app/TissueTesting/Data/CMULTIS003/023_new/023.xml'

files, testVariables = parseTestXML(xmlname)

stresses=[]
strains=[]

filecount=0
fs=2500
threshold=200
# Filter
order = 3
cutoff = 20  # desired cutoff frequency of the filter, Hz
MachData = {}

for f_itr, file in enumerate(files):

    input_filename = os.path.join(os.path.dirname(xmlname), file) # Test data from Mach-1 (.txt file)
    infile = open(input_filename, 'r')

    # Extract test variables for current test (file)
    init_length = float(testVariables[file]['InitialLength'])   #collects info about the sample
    init_length2 = float(testVariables[file]['InitialLength2'])
    thickness=float(testVariables[file]['Thickness'])
    width=float(testVariables[file]['Width'])

    area = float(thickness*width)  #area

    # --------------------
    # pick all data for move relative and wait commands'
    rampcount = 0
    ramplist = []       #name of each ramp

    step = False
    MachData[file] = {}
    prevStepEndTime = 0

    for n, line in enumerate(infile):  # read each line to create ramplist
        if line[1:5] == 'Move' or line[1:4] == "Sin":
            step=True
            MachData[file][str(rampcount)] = {}
            MachData[file][str(rampcount)]['Name'] = line # i.e. "Move Relative", "Move Absolute", "Sinusoid"
            ramplist.append(line)
        elif line[0:8] == 'Velocity' and step == True:
            MachData[file][str(rampcount)]['velocity'] = line[16:22]
        elif line[1:5] == 'DATA' and step == True:
            skip_row = n
        elif line[1:9] == 'END DATA' and step==True:
            nrow = n
            data = pd.read_csv(input_filename, skiprows=skip_row+1, nrows=nrow-skip_row-2, delimiter='\t')
            # print(data['Time, s'])
            MachData[file][str(rampcount)]['Data'] = data
            MachData[file][str(rampcount)]['Data']['Filtered Load'] = butter_lowpass_filter(abs(data['Fz, gf']), cutoff, fs, order)

            MachData[file][str(rampcount)]['Data']['Filtered Position'] =abs(data['Position (z), mm'])
            MachData[file][str(rampcount)]['Data']['Filtered Displacement1'] = data['Filtered Position'] - init_length #First ramp displacement
            MachData[file][str(rampcount)]['Data']['Filtered Displacement2'] = data['Filtered Position'] - init_length2


            MachData[file][str(rampcount)]['Data']['Total Time'] = data['Time, s'] + prevStepEndTime
            prevStepEndTime += data.loc[data['Time, s'].index[-1], 'Time, s']
            rampcount += 1
            step=False

    infile.close()

    # print(str("----------"))
    # print("Expected rate, mm/s =" + str(vel[-2])

    MachData[file]['2']['Data']['Strain1'] = MachData[file]['2']['Data']['Filtered Displacement1']/init_length  #change in length/ initial length to find strain
    MachData[file]['2']['Data']['Stress1'] = MachData[file]['2']['Data']['Filtered Load']*.0098/area    #Convert to newtons and divide by area for stress

    MachData[file]['10']['Data']['Strain2'] = MachData[file]['10']['Data']['Filtered Displacement2'] / init_length2
    MachData[file]['10']['Data']['Stress2'] = MachData[file]['10']['Data']['Filtered Load'] * .0098 / area

with open(xmlname[0:-3]+'pickle','wb') as handle:       #create pickle file to store MachData dictionary
    pickle.dump(MachData,handle,protocol=pickle.HIGHEST_PROTOCOL)