diff --git a/orcasong/file_to_hits.py b/orcasong/file_to_hits.py index 4cd53c40fb01ccc6c7d1cc283ecb9151e2c47ac9..47a6f5c5b3abf16b30417992340b247fd4f76dff 100644 --- a/orcasong/file_to_hits.py +++ b/orcasong/file_to_hits.py @@ -135,17 +135,15 @@ def get_tracks(event_blob, file_particle_type, event_hits, prod_ident): """ # parse EventInfo and Header information event_id = event_blob['EventInfo'].event_id[0] - run_id = event_blob['Header'].start_run.run_id.astype('float32') - - # if 'Header' in event_blob: # if Header exists in file, take run_id from it. - # run_id = event_blob['Header'].start_run.run_id.astype('float32') - # else: - # if file_particle_type == 'muon': - # run_id = event_blob['RawHeader'][1][0].astype('float32') - # elif file_particle_type == 'undefined': # currently used with random_noise files - # run_id = event_blob['EventInfo'].run_id - # else: - # run_id = event_blob['RawHeader'][0][0].astype('float32') + + if 'Header' in event_blob: # if Header exists in file, take run_id from it. + run_id = event_blob['Header'].start_run.run_id.astype('float32') + else: + if file_particle_type == 'undefined': # currently used with random_noise files + run_id = event_blob['EventInfo'].run_id + else: + raise InputError('The run_id could not be read from the EventInfo or the Header, ' + 'please check the source code in get_tracks().') # collect all event_track information, dependent on file_particle_type @@ -191,15 +189,6 @@ def get_tracks(event_blob, file_particle_type, event_hits, prod_ident): hits_time, triggered = event_hits[:, 3], event_hits[:, 4] time_residual_vertex = get_time_residual_nu_interaction_mean_triggered_hits(time_interaction, hits_time, triggered) - if event_id == 12627: - print(time_interaction) - hits_time_triggered = hits_time[triggered == 1] - print(hits_time_triggered) - t_mean_triggered = np.mean(hits_time_triggered, dtype=np.float64) - print(t_mean_triggered) - time_residual_vertex = t_mean_triggered - time_interaction - print(time_residual_vertex) - track = {'event_id': event_id, 'particle_type': particle_type, 'energy': energy, 'is_cc': is_cc, 'bjorkeny': bjorkeny, 'dir_x': dir_x, 'dir_y': dir_y, 'dir_z': dir_z, 'time_interaction': time_interaction, 'run_id': run_id, 'vertex_pos_x': vertex_pos_x, diff --git a/orcasong/tests/generator_speed_test_w_compression/evaluate_generator_IO_speed.py b/orcasong/tests/generator_speed_test_w_compression/evaluate_generator_IO_speed.py deleted file mode 100644 index 4cfbfef0b43e1d0a3484eca23aea777087a30c52..0000000000000000000000000000000000000000 --- a/orcasong/tests/generator_speed_test_w_compression/evaluate_generator_IO_speed.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -"""Placeholder""" - -import numpy as np -import h5py -import timeit -import cProfile - -def generate_batches_from_hdf5_file(): - - #filepath = './JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yz_NO_COMPRESSION_NOT_CHUNKED.h5' #2D, no compression - #filepath = './JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yz_LZF_CHUNKED.h5' #2D, LZF - - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_NO_COMPRESSION_NOT_CHUNKED.h5' #3D, no compression - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_NO_COMPRESSION_CHUNKED.h5' #3D, no compression, chunked - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_LARGE_NO_COMPRESSION_CHUNKED.h5' #3D, no compression, chunked, LARGE - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_LZF_CHUNKED.h5' #3D, LZF - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_LZF_LARGE.h5' #3D, LZF, LARGE, Shuffle True - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_LZF_SHUFFLETRUE.h5' #3D, LZF, Shuffle=True - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_GZIP_COMPROPT_1.h5' #3D, GZIP - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_456_yzt_GZIP_COMPROPT_1_CHUNKSIZE_16.h5' #3D, GZIP, chunksize=16 - - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_yzt_no_compression.h5' #3D, no compression - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_yzt_gzip_1.h5' # 3D, gzip, compression_opts=1 - - - # 4d - filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_xyzt_no_compression_chunked.h5' # 4D, (11x13x18x50)), no compression. chunksize=32 --> 1011 ms - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_xyzt_lzf.h5' # 4D, (11x13x18x50), lzf --> 2194 ms - #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_xyzt_gzip_1.h5' # 4D, (11x13x18x50), gzip, compression_opts=1 --> 1655 ms - - - #print 'Testing generator on file ' + filepath - #batchsize = 16 - batchsize = 32 - - #dimensions = (batchsize, 13, 18, 1) # 2D - #dimensions = (batchsize, 13, 18, 50, 1) # 3D - dimensions = (batchsize, 11, 13, 18, 50) # 4D - - f = h5py.File(filepath, "r") - filesize = len(f['y']) - print(filesize) - - n_entries = 0 - while n_entries < (filesize - batchsize): - xs = f['x'][n_entries : n_entries + batchsize] - xs = np.reshape(xs, dimensions).astype(np.float32) - - y_values = f['y'][n_entries:n_entries+batchsize] - y_values = np.reshape(y_values, (batchsize, y_values.shape[1])) - ys = np.zeros((batchsize, 2), dtype=np.float32) - - for c, y_val in enumerate(y_values): - ys[c] = y_val[0:2] # just for testing - - n_entries += batchsize - #print n_entries - yield (xs, ys) - f.close() - - -number = 20 -#t = timeit.timeit(generate_batches_from_hdf5_file, number = number) -#t = timeit.Timer(stmt="list(generate_batches_from_hdf5_file())", setup="from __main__ import generate_batches_from_hdf5_file") -#print t.timeit(number) / number -#print str(number) + 'loops, on average ' + str(t.timeit(number) / number *1000) + 'ms' - -pr = cProfile.Profile() -pr.enable() - -t = timeit.Timer(stmt="list(generate_batches_from_hdf5_file())", setup="from __main__ import generate_batches_from_hdf5_file") -print(str(number) + 'loops, on average ' + str(t.timeit(number) / number *1000) + 'ms') - -pr.disable() - -pr.print_stats(sort='time') - -# TODO check with blosc \ No newline at end of file diff --git a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-c.toml b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-c.toml index c6dc7b5accf742281c095c7e5ecd9ed5fe438ebd..afe026df86dfd4f61af38b4745a825f9d13e7082 100644 --- a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-c.toml +++ b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-c.toml @@ -36,7 +36,7 @@ --timecut_mode = 'trigger_cluster' --timecut_timespan = 'tight-0' --prod_ident = 3 # for neutrinos: 1: 3-100 GeV prod, 2: 1-5 GeV prod ; mupage: 3 ; random_noise: 4 ---o = '/home/woody/capn/mppi033h/orcasong_output' +--o = '/home/woody/capn/mppi033h' --chunksize = 32 --complib = 'zlib' ---complevel = '1' \ No newline at end of file +--complevel = 1 \ No newline at end of file diff --git a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-t.toml b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-t.toml index 1d6b6fbffe178bd8054597c9dbc32d1f430f67ff..cc7a7761c0a0d4754c9c6420d5306d222bd708c6 100644 --- a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-t.toml +++ b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-t.toml @@ -36,7 +36,7 @@ --timecut_mode = 'trigger_cluster' --timecut_timespan = 'tight-0' --prod_ident = 3 # for neutrinos: 1: 3-100 GeV prod, 2: 1-5 GeV prod ; mupage: 3 ; random_noise: 4 ---o = '/home/woody/capn/mppi033h/orcasong_output' +--o = '/home/woody/capn/mppi033h' --chunksize = 32 --complib = 'zlib' ---complevel = '1' \ No newline at end of file +--complevel = 1 \ No newline at end of file diff --git a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-c.toml b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-c.toml index 2f070c2699e446712b4fb59de2c290cd1efffd92..79d51c18c6401c5c39b7adb750647203ffca6570 100644 --- a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-c.toml +++ b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-c.toml @@ -1,8 +1,11 @@ -# A config file for OrcaSong with multiple configurations. -# Outcomment the config that you want to use! +# A config file for OrcaSong with a list of all configuration options. # More info about the .toml format at https://github.com/toml-lang/toml ### All available options with some dummy values +# --o = '/home/woody/capn/mppi033h/orcasong_output' +# --chunksize = 32 +# --complib = 'zlib' +# --complevel = '1' # --n_bins = '11,13,18,60' # --det_geo = 'Orca_115l_23m_h_9m_v' # --do2d = false @@ -32,4 +35,8 @@ --do4d_mode = 'channel_id' --timecut_mode = 'trigger_cluster' --timecut_timespan = 'tight-0' ---prod_ident = 4 # for neutrinos: 1: 3-100 GeV prod, 2: 1-5 GeV prod ; mupage: 3 ; random_noise: 4 \ No newline at end of file +--prod_ident = 4 # for neutrinos: 1: 3-100 GeV prod, 2: 1-5 GeV prod ; mupage: 3 ; random_noise: 4 +--o = '/home/woody/capn/mppi033h' +--chunksize = 32 +--complib = 'zlib' +--complevel = 1 \ No newline at end of file diff --git a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-t.toml b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-t.toml index 9a0940d46cfaaec585e9b3b4c633e7c47153b3a5..812a39baeb7db72d35a96be2e48ab49b94a9f508 100644 --- a/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-t.toml +++ b/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-t.toml @@ -1,8 +1,11 @@ -# A config file for OrcaSong with multiple configurations. -# Outcomment the config that you want to use! +# A config file for OrcaSong with a list of all configuration options. # More info about the .toml format at https://github.com/toml-lang/toml ### All available options with some dummy values +# --o = '/home/woody/capn/mppi033h/orcasong_output' +# --chunksize = 32 +# --complib = 'zlib' +# --complevel = '1' # --n_bins = '11,13,18,60' # --det_geo = 'Orca_115l_23m_h_9m_v' # --do2d = false @@ -32,4 +35,8 @@ --do4d_mode = 'time' --timecut_mode = 'trigger_cluster' --timecut_timespan = 'tight-0' ---prod_ident = 4 # for neutrinos: 1: 3-100 GeV prod, 2: 1-5 GeV prod ; mupage: 3 ; random_noise: 4 \ No newline at end of file +--prod_ident = 4 # for neutrinos: 1: 3-100 GeV prod, 2: 1-5 GeV prod ; mupage: 3 ; random_noise: 4 +--o = '/home/woody/capn/mppi033h' +--chunksize = 32 +--complib = 'zlib' +--complevel = 1 \ No newline at end of file diff --git a/orcasong/detx_files/orca_115strings_av23min20mhorizontal_18OMs_alt9mvertical_v1.detx b/user/detx_files/orca_115strings_av23min20mhorizontal_18OMs_alt9mvertical_v1.detx similarity index 100% rename from orcasong/detx_files/orca_115strings_av23min20mhorizontal_18OMs_alt9mvertical_v1.detx rename to user/detx_files/orca_115strings_av23min20mhorizontal_18OMs_alt9mvertical_v1.detx diff --git a/user/job_submission_scripts/submit_data_to_images.sh b/user/job_submission_scripts/submit_data_to_images.sh index b246b7deb5e06217f6d8d7a11bfbe450f3a7fe00..a4ea8636ce0dd97aeb308f9a27af4bae76ce8983 100644 --- a/user/job_submission_scripts/submit_data_to_images.sh +++ b/user/job_submission_scripts/submit_data_to_images.sh @@ -1,7 +1,7 @@ #!/bin/bash # -#PBS -l nodes=1:ppn=4:sl32g,walltime=06:00:00 -#PBS -o /home/woody/capn/mppi033h/logs/submit_data_to_images_${PBS_JOBID}_${PBS_ARRAYID}.out -e /home/woody/capn/mppi033h/logs/submit_data_to_images_${PBS_JOBID}_${PBS_ARRAYID}.err +#PBS -l nodes=1:ppn=4:sl32g,walltime=15:00:00 +#PBS -o /home/woody/capn/mppi033h/logs/orcasong_submit_data_to_images_${PBS_JOBID}_${PBS_ARRAYID}.out -e /home/woody/capn/mppi033h/logs/orcasong/submit_data_to_images_${PBS_JOBID}_${PBS_ARRAYID}.err # first non-empty non-comment line ends PBS options # Submit with 'qsub -t 1-10 submit_data_to_images.sh' @@ -20,18 +20,18 @@ # load env, only working for conda env as of now python_env_folder=/home/hpc/capn/mppi033h/.virtualenv/python_3_env/ -code_folder=/home/woody/capn/mppi033h/Code/OrcaSong +job_logs_folder=/home/woody/capn/mppi033h/logs/orcasong/cout -detx_filepath=${code_folder}/orcasong/detx_files/orca_115strings_av23min20mhorizontal_18OMs_alt9mvertical_v1.detx -config_file=${code_folder}/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_random_noise_xyz-c.toml +detx_filepath=/home/woody/capn/mppi033h/Code/OrcaSong/user/detx_files/orca_115strings_av23min20mhorizontal_18OMs_alt9mvertical_v1.detx +config_file=/home/woody/capn/mppi033h/Code/OrcaSong/user/config/orca_115l_mupage_rn_neutr_classifier/conf_ORCA_115l_mupage_xyz-c.toml -particle_type=random_noise -mc_prod=random_noise +particle_type=mupage +mc_prod=mupage # total number of files per job, e.g. 10 jobs for 600: 600/10 = 60. For neutrin # For neutrinos and random_noise n=60 with PBS -l nodes=1:ppn=4:sl32g,walltime=3:00:00 # For mupage: n=1000 with PBS -l nodes=1:ppn=4:sl32g,walltime=15:00:00 -files_per_job=50 +files_per_job=1000 #--- USER INPUT ---# @@ -39,7 +39,6 @@ files_per_job=50 n=${PBS_ARRAYID} source activate ${python_env_folder} -cd ${code_folder} declare -A filename_arr declare -A folder_ip_files_arr @@ -83,10 +82,10 @@ do thread3=$((${file_no_loop_start} + 2)) thread4=$((${file_no_loop_start} + 3)) - (time taskset -c 0 python ${code_folder}/orcasong/data_to_images.py -c ${config_file} ${folder}/${filename}.${thread1}.h5 ${detx_filepath} > ${code_folder}/user/job_logs/cout/${filename}.${thread1}.txt) & - (time taskset -c 1 python ${code_folder}/orcasong/data_to_images.py -c ${config_file} ${folder}/${filename}.${thread2}.h5 ${detx_filepath} > ${code_folder}/user/job_logs/cout/${filename}.${thread2}.txt) & - (time taskset -c 2 python ${code_folder}/orcasong/data_to_images.py -c ${config_file} ${folder}/${filename}.${thread3}.h5 ${detx_filepath} > ${code_folder}/user/job_logs/cout/${filename}.${thread3}.txt) & - (time taskset -c 3 python ${code_folder}/orcasong/data_to_images.py -c ${config_file} ${folder}/${filename}.${thread4}.h5 ${detx_filepath} > ${code_folder}/user/job_logs/cout/${filename}.${thread4}.txt) & + (time taskset -c 0 make_nn_images -c ${config_file} ${folder}/${filename}.${thread1}.h5 ${detx_filepath} > ${job_logs_folder}/${filename}.${thread1}.txt) & + (time taskset -c 1 make_nn_images -c ${config_file} ${folder}/${filename}.${thread2}.h5 ${detx_filepath} > ${job_logs_folder}/${filename}.${thread2}.txt) & + (time taskset -c 2 make_nn_images -c ${config_file} ${folder}/${filename}.${thread3}.h5 ${detx_filepath} > ${job_logs_folder}/${filename}.${thread3}.txt) & + (time taskset -c 3 make_nn_images -c ${config_file} ${folder}/${filename}.${thread4}.h5 ${detx_filepath} > ${job_logs_folder}/${filename}.${thread4}.txt) & wait done diff --git a/orcasong/tests/timecut_test/__init__.py b/utilities/__init__.py similarity index 100% rename from orcasong/tests/timecut_test/__init__.py rename to utilities/__init__.py diff --git a/utilities/evaluate_generator_IO_speed.py b/utilities/evaluate_generator_IO_speed.py new file mode 100644 index 0000000000000000000000000000000000000000..5c9859bec981598e41e484264237ed7d5d54e5d3 --- /dev/null +++ b/utilities/evaluate_generator_IO_speed.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +"""Code for testing the readout speed of orcasong .hdf5 files.""" + +import numpy as np +import h5py +import timeit +import cProfile + +def generate_batches_from_hdf5_file(): + # 4d + #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_xyzt_no_compression_chunked.h5' # 4D, (11x13x18x50)), no compression. chunksize=32 --> 1011 ms + #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_xyzt_lzf.h5' # 4D, (11x13x18x50), lzf --> 2194 ms + #filepath = 'JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_9_xyzt_gzip_1.h5' # 4D, (11x13x18x50), gzip, compression_opts=1 --> 1655 ms + + # With new km3pipe structure + filepath = '/home/woody/capn/mppi033h/orcasong_output/4dTo4d/xyzc/JTE_ph_ph_mupage_ph_ph_ph_ORCA115_9m_2016_9_xyzc.h5' + + print('Testing generator on file ' + filepath) + batchsize = 32 + dimensions = (batchsize, 11, 13, 18, 31) # 4D + + f = h5py.File(filepath, "r") + filesize = len(f['y']) + print(filesize) + + n_entries = 0 + while n_entries < (filesize - batchsize): + xs = f['x'][n_entries : n_entries + batchsize] + xs = np.reshape(xs, dimensions).astype(np.float32) + + y_values = f['y'][n_entries:n_entries+batchsize] + ys = y_values[['run_id', 'event_id']] + + n_entries += batchsize + yield (xs, ys) + f.close() + + +number = 20 +#t = timeit.timeit(generate_batches_from_hdf5_file, number = number) +#t = timeit.Timer(stmt="list(generate_batches_from_hdf5_file())", setup="from __main__ import generate_batches_from_hdf5_file") +#print t.timeit(number) / number +#print str(number) + 'loops, on average ' + str(t.timeit(number) / number *1000) + 'ms' + +pr = cProfile.Profile() +pr.enable() + +t = timeit.Timer(stmt="list(generate_batches_from_hdf5_file())", setup="from __main__ import generate_batches_from_hdf5_file") +print(str(number) + 'loops, on average ' + str(t.timeit(number) / number *1000) + 'ms') + +pr.disable() + +pr.print_stats(sort='time') \ No newline at end of file diff --git a/utilities/timecut_test/__init__.py b/utilities/timecut_test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/orcasong/tests/timecut_test/plots/elec-CC/.gitkeep b/utilities/timecut_test/plots/elec-CC/.gitkeep similarity index 100% rename from orcasong/tests/timecut_test/plots/elec-CC/.gitkeep rename to utilities/timecut_test/plots/elec-CC/.gitkeep diff --git a/orcasong/tests/timecut_test/plots/muon-CC/.gitkeep b/utilities/timecut_test/plots/muon-CC/.gitkeep similarity index 100% rename from orcasong/tests/timecut_test/plots/muon-CC/.gitkeep rename to utilities/timecut_test/plots/muon-CC/.gitkeep diff --git a/orcasong/tests/timecut_test/plots/mupage/.gitkeep b/utilities/timecut_test/plots/mupage/.gitkeep similarity index 100% rename from orcasong/tests/timecut_test/plots/mupage/.gitkeep rename to utilities/timecut_test/plots/mupage/.gitkeep diff --git a/orcasong/tests/timecut_test/plots/random_noise/.gitkeep b/utilities/timecut_test/plots/random_noise/.gitkeep similarity index 100% rename from orcasong/tests/timecut_test/plots/random_noise/.gitkeep rename to utilities/timecut_test/plots/random_noise/.gitkeep diff --git a/orcasong/tests/timecut_test/plots/tau-CC/.gitkeep b/utilities/timecut_test/plots/tau-CC/.gitkeep similarity index 100% rename from orcasong/tests/timecut_test/plots/tau-CC/.gitkeep rename to utilities/timecut_test/plots/tau-CC/.gitkeep diff --git a/orcasong/tests/timecut_test/timecut_test.py b/utilities/timecut_test/timecut_test.py similarity index 100% rename from orcasong/tests/timecut_test/timecut_test.py rename to utilities/timecut_test/timecut_test.py