CHATGPT AI
fitting
_parameters_file, "r")
fitting_parameters = json.load(f)
f.close()

# Read in the initial guess parameters
initial_guess_file = os.path.join(data_directory, "initial_guess.json")
f = open(initial_guess_file, "r")
initial_guess = json.load(f)
f.close()

# Read in the data files
datafiles = []
for filename in os.listdir(data_directory):
if filename[-4:] == ".csv":
datafiles.append(os.path.join(data_directory, filename))

# Read in the data from each file and store it as a list of dictionaries with keys 'x' and 'y'
data = [] # List of dictionaries with keys 'x' and 'y' for each dataset to be fitted

for file in datafiles:

print("Loading", file)

x, y = np.genfromtxt(file, delimiter=",").T # Transpose makes it easier to work with

dic = {'x': x, 'y': y} # Create a dictionary entry for this dataset

data.append(dic) # Add it to the list of datasets

print("Loaded", len(x), "points")

print("Loaded", len(data), "datasets\n")

# Fit the model to all of the datasets simultaneously using lmfit's Parameters class and minimize function
fitparams = lmfit.Parameters() # Create an empty Parameters object to store the fit parameters

for key in fitting_parameters: # Loop through all of the parameters that need to be fitted and add them to fitparams

param = fitting_parameters[key] # Get a dictionary containing information about this parameter from fitting_parameters dictionary

fitparams.add(key, value=initial_guess[key], vary=param['vary'], min=param['min'], max=param['max']) # Add this parameter to fitparams using information from param and initial guess values from initial guess dictionary (which should have been read in earlier)

_parameters_file, "r") fitting_parameters = json.load(f) f.close() # Read in the initial guess parameters initial_guess_file = os.path.join(data_directory, "initial_guess.json") f = open(initial_guess_file, "r") initial_guess = json.load(f) f.close() # Read in the data files datafiles = [] for filename in os.listdir(data_directory): if filename[-4:] == ".csv": datafiles.append(os.path.join(data_directory, filename)) # Read in the data from each file and store it as a list of dictionaries with keys 'x' and 'y' data = [] # List of dictionaries with keys 'x' and 'y' for each dataset to be fitted for file in datafiles: print("Loading", file) x, y = np.genfromtxt(file, delimiter=",").T # Transpose makes it easier to work with dic = {'x': x, 'y': y} # Create a dictionary entry for this dataset data.append(dic) # Add it to the list of datasets print("Loaded", len(x), "points") print("Loaded", len(data), "datasets\n") # Fit the model to all of the datasets simultaneously using lmfit's Parameters class and minimize function fitparams = lmfit.Parameters() # Create an empty Parameters object to store the fit parameters for key in fitting_parameters: # Loop through all of the parameters that need to be fitted and add them to fitparams param = fitting_parameters[key] # Get a dictionary containing information about this parameter from fitting_parameters dictionary fitparams.add(key, value=initial_guess[key], vary=param['vary'], min=param['min'], max=param['max']) # Add this parameter to fitparams using information from param and initial guess values from initial guess dictionary (which should have been read in earlier)
0 Comments & Tags 0 Shares 1 Views

Password Copied!

Please Wait....