Commit 710c28bc authored by Danniene Wete's avatar Danniene Wete

implement SW creation approach which uses each codeword as SW.

parent 69a9f663
......@@ -2,16 +2,16 @@
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(10299, 3, 128)"
"(7352, 3, 128)"
]
},
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
......@@ -24,29 +24,23 @@
"\n",
"#Load train data\n",
"\n",
"trainAcc_x = np.loadtxt('../../data/total_acc_x_train.txt') # (7352, 128)\n",
"testAcc_x = np.loadtxt('../../data/total_acc_x_test.txt') # (2947, 128)\n",
"acc_x = np.concatenate((trainAcc_x, testAcc_x), axis=0) # (10299, 128)\n",
"\n",
"trainAcc_y = np.loadtxt('../../data/total_acc_y_train.txt')\n",
"testAcc_y = np.loadtxt('../../data/total_acc_y_test.txt')\n",
"acc_y = np.concatenate((trainAcc_y, testAcc_y), axis=0)\n",
"acc_x = np.loadtxt('../../data/total_acc_x_train.txt')\n",
"acc_y = np.loadtxt('../../data/total_acc_y_train.txt')\n",
"acc_z = np.loadtxt('../../data/total_acc_z_train.txt')\n",
"\n",
"trainAcc_z = np.loadtxt('../../data/total_acc_z_train.txt')\n",
"testAcc_z = np.loadtxt('../../data/total_acc_y_test.txt')\n",
"acc_z = np.concatenate((trainAcc_z, testAcc_z), axis=0)\n",
"gyro_x = np.loadtxt('../../data/body_gyro_x_train.txt')\n",
"gyro_y = np.loadtxt('../../data/body_gyro_y_train.txt')\n",
"gyro_z = np.loadtxt('../../data/body_gyro_z_train.txt')\n",
"\n",
"trainGyr_x = np.loadtxt('../../data/body_gyro_x_train.txt')\n",
"testGyr_x = np.loadtxt('../../data/body_gyro_x_test.txt')\n",
"gyro_x = np.concatenate((trainGyr_x, testGyr_x), axis=0)\n",
"#Load raw test data\n",
"\n",
"trainGyr_y = np.loadtxt('../../data/body_gyro_y_train.txt')\n",
"testGyr_y = np.loadtxt('../../data/body_gyro_y_test.txt')\n",
"gyro_y = np.concatenate((trainGyr_y, testGyr_y), axis=0)\n",
"acc_xtest = np.loadtxt('../../data/total_acc_x_test.txt')\n",
"acc_ytest = np.loadtxt('../../data/total_acc_y_test.txt')\n",
"acc_ztest = np.loadtxt('../../data/total_acc_z_test.txt')\n",
"gyro_xtest = np.loadtxt('../../data/body_gyro_x_test.txt')\n",
"gyro_ytest = np.loadtxt('../../data/body_gyro_y_test.txt')\n",
"gyro_ztest = np.loadtxt('../../data/body_gyro_z_test.txt')\n",
"\n",
"trainGyr_z = np.loadtxt('../../data/body_gyro_z_train.txt')\n",
"testGyr_z = np.loadtxt('../../data/body_gyro_z_test.txt')\n",
"gyro_z = np.concatenate((trainGyr_z, testGyr_z), axis=0)\n",
"\n",
"\n",
"# Combine all 3 channels data to form one 3D matrix of data\n",
......@@ -61,6 +55,9 @@
"trainAcc = combineData(acc_x, acc_y, acc_z)\n",
"trainGyr = combineData(gyro_x, gyro_y, gyro_z)\n",
"\n",
"testAcc = combineData(acc_xtest, acc_ytest, acc_ztest)\n",
"testGyr = combineData(gyro_xtest, gyro_ytest, gyro_ztest)\n",
"\n",
"trainAcc.shape"
]
},
......@@ -185,36 +182,23 @@
" \n",
" bow = []\n",
" for i in range(0, acc.shape[1]):\n",
" words = []\n",
" \n",
" x1x2_words = list(zip(x_acc[i], x_gyr[i]))\n",
" words.extend([(str('xx')+(a+b)) for (a,b) in x1x2_words])\n",
" \n",
" x1y2_words = list(zip(x_acc[i], y_gyr[i]))\n",
" words.extend([(str('xy')+(a+b)) for (a, b) in x1y2_words])\n",
" \n",
" x1z2_words = list(zip(x_acc[i], z_gyr[i]))\n",
" words.extend([(str('xz')+(a+b)) for (a, b) in x1z2_words])\n",
" \n",
" #y1x2_words = list(zip(y_acc[i], x_gyr[i]))\n",
" #words.extend([(str('yz')+(a+b)) for (a, b) in y1x2_words]) \n",
" \n",
" \n",
" y1y2_words = list(zip(y_acc[i], y_gyr[i]))\n",
" words.extend([(str('yy')+(a+b)) for (a, b) in y1y2_words])\n",
" \n",
" y1z2_words = list(zip(y_acc[i], z_gyr[i]))\n",
" words.extend([(str('yz')+(a+b)) for (a, b) in y1z2_words]) \n",
" #create SW for the accelerometer sensor\n",
" acc_xwords = ['ax'+k for k in x_acc[i]]\n",
" acc_ywords = ['ay'+k for k in y_acc[i]]\n",
" acc_zwords = ['az'+k for k in z_acc[i]] \n",
" acc_words = np.hstack((acc_xwords, acc_ywords, acc_zwords))\n",
" \n",
" #z1x2_words = list(zip(z_acc[i], x_gyr[i]))\n",
" #words.extend([(str('zx')+(a+b)) for (a, b) in z1x2_words])\n",
" \n",
" #z1y2_words = list(zip(z_acc[i], y_gyr[i]))\n",
" #words.extend([(str('zy')+(a+b)) for (a, b) in z1y2_words])\n",
" #create SW for gyroscope sensor\n",
" gyr_xwords = ['gx'+k for k in x_gyr[i]]\n",
" gyr_ywords = ['gy'+k for k in y_gyr[i]]\n",
" gyr_zwords = ['gz'+k for k in x_gyr[i]] \n",
" gyr_words = np.hstack((gyr_xwords, gyr_ywords, gyr_zwords))\n",
" \n",
" \n",
" z1z2_words = list(zip(z_acc[i], z_gyr[i]))\n",
" words.extend([(str('zz')+(a+b)) for (a, b) in z1z2_words])\n",
" #merge accelerometer and gyroscope words\n",
" words = np.hstack((acc_words, gyr_words)) \n",
" bow.append(words)\n",
" \n",
" return np.array(bow) "
......@@ -240,11 +224,20 @@
" overlap_length = window_length // 2 \n",
" \n",
" trainAcc_window = sliding_window_approach(trainAcc,window_length,overlap_length)\n",
" trainGyr_window = sliding_window_approach(trainGyr,window_length,overlap_length) \n",
" trainGyr_window = sliding_window_approach(trainGyr,window_length,overlap_length) \n",
" \n",
" testAcc_window = sliding_window_approach(testAcc,window_length,overlap_length)\n",
" testGyr_window = sliding_window_approach(testGyr,window_length,overlap_length)\n",
" \n",
" #2. Clustering\n",
" trainAcc_centroids = calc_centroids_array(trainAcc_window, n_cluster)\n",
" trainGyr_centroids = calc_centroids_array(trainGyr_window, n_cluster)\n",
" # Save centroids on disk for using later\n",
" with open('trainTestCorpus_m1/trainAcc.centroids', 'wb') as fp:\n",
" pickle.dump(trainAcc_centroids, fp)\n",
" with open('trainTestCorpus_m1/trainGyr.centroids', 'wb') as fp: \n",
" pickle.dump(trainGyr_centroids, fp)\n",
" \n",
" \n",
" \n",
" #3. Map centroids to characters\n",
......@@ -255,28 +248,26 @@
" \n",
" trainAcc_count = trainAcc.shape[0]\n",
" trainGyr_count = trainGyr.shape[0]\n",
" testAcc_count = testAcc.shape[0]\n",
" testGyr_count = testGyr.shape[0] \n",
" \n",
" \n",
" #4 Assign subsequences to cluster centre and replace subsequence with alphabet of cluster centre\n",
" trainAcc_charsSeq = mapCodewordsToChars(trainAcc_window, trainAcc_centroids,trainAcc_count)\n",
" trainGyr_charsSeq = mapCodewordsToChars(trainGyr_window, trainGyr_centroids,trainGyr_count) \n",
" trainGyr_charsSeq = mapCodewordsToChars(trainGyr_window, trainGyr_centroids,trainGyr_count) \n",
" testAcc_charsSeq = mapCodewordsToChars(testAcc_window, trainAcc_centroids, testAcc_count)\n",
" testGyr_charsSeq = mapCodewordsToChars(testGyr_window, trainGyr_centroids, testGyr_count)\n",
" \n",
" \n",
" \n",
" #5. Create train and test bag of words\n",
" trainBow = create_words(trainAcc_charsSeq, trainGyr_charsSeq)\n",
" testBow = create_words(testAcc_charsSeq, testGyr_charsSeq) \n",
" \n",
" \n",
" \n",
" return trainBow"
" return trainBow, testBow"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
......
......@@ -6,7 +6,7 @@
"metadata": {},
"outputs": [],
"source": [
"%run createSensoryWords_fs4.ipynb\n",
"%run createSensoryWords.ipynb\n",
"\n",
"import numpy as np\n",
"import pickle\n",
......@@ -37,10 +37,10 @@
"outputs": [],
"source": [
"# serialize a train_docs to disk for later use\n",
"with open('trainTestCorpus_m2c/corpus.train', 'wb') as fp:\n",
"with open('trainTestCorpus_m1/corpus.train', 'wb') as fp:\n",
" pickle.dump(train_docs, fp)\n",
"\n",
"with open('trainTestCorpus_m2c/corpus.test', 'wb') as fp:\n",
"with open('trainTestCorpus_m1/corpus.test', 'wb') as fp:\n",
" pickle.dump(test_docs, fp)"
]
},
......@@ -220,7 +220,7 @@
" acc = accuracy_score(y_train, pred_labels)\n",
" p = [rdst, acc]\n",
" \n",
" with open('randomSeed_scors_m2cNew.txt', 'a') as file: # save output in file \n",
" with open('randomSeed_scors_m1.txt', 'a') as file: # save output in file \n",
" s = ['[', ']', ',']\n",
" p = str(list(p))\n",
" for e in s:\n",
......
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"scores = np.loadtxt('paramtuning_scores0.txt')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"537"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"np.argmax(scores[:,3])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"data = np.loadtxt('randomSeed_scors_m1.txt')"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.8215451577801959"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"np.max(data[:, 1])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"725"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"np.argmax(data[:, 1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
......@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"metadata": {},
"outputs": [
{
......@@ -11,7 +11,7 @@
"(7352, 3, 128)"
]
},
"execution_count": 4,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
......@@ -55,12 +55,15 @@
"trainAcc = combineData(acc_x, acc_y, acc_z)\n",
"trainGyr = combineData(gyro_x, gyro_y, gyro_z)\n",
"\n",
"testAcc = combineData(acc_xtest, acc_ytest, acc_ztest)\n",
"testGyr = combineData(gyro_xtest, gyro_ytest, gyro_ztest)\n",
"\n",
"trainAcc.shape"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
......@@ -181,12 +184,18 @@
" for i in range(0, acc.shape[1]):\n",
" \n",
" #create SW for the accelerometer sensor\n",
" acc_words = np.hstack((x_acc[i], y_acc[i], z_acc[i]))\n",
" acc_words = ['acc'+w for w in acc_words]\n",
" #\n",
" acc_xwords = ['ax'+k for k in x_acc[i]]\n",
" acc_ywords = ['ay'+k for k in y_acc[i]]\n",
" acc_zwords = ['az'+k for k in z_acc[i]] \n",
" acc_words = np.hstack((acc_xwords, acc_ywords, acc_zwords))\n",
" \n",
" \n",
" #create SW for gyroscope sensor\n",
" gyr_words = np.hstack((x_gyr[i], y_gyr[i], z_gyr[i]))\n",
" gyr_words = ['gyr'+w for w in gyr_words]\n",
" gyr_xwords = ['gx'+k for k in x_gyr[i]]\n",
" gyr_ywords = ['gy'+k for k in y_gyr[i]]\n",
" gyr_zwords = ['gz'+k for k in x_gyr[i]] \n",
" gyr_words = np.hstack((gyr_xwords, gyr_ywords, gyr_zwords))\n",
" \n",
" \n",
" #merge accelerometer and gyroscope words\n",
" words = np.hstack((acc_words, gyr_words)) \n",
......@@ -197,7 +206,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
......@@ -215,11 +224,20 @@
" overlap_length = window_length // 2 \n",
" \n",
" trainAcc_window = sliding_window_approach(trainAcc,window_length,overlap_length)\n",
" trainGyr_window = sliding_window_approach(trainGyr,window_length,overlap_length) \n",
" trainGyr_window = sliding_window_approach(trainGyr,window_length,overlap_length) \n",
" \n",
" testAcc_window = sliding_window_approach(testAcc,window_length,overlap_length)\n",
" testGyr_window = sliding_window_approach(testGyr,window_length,overlap_length)\n",
" \n",
" #2. Clustering\n",
" trainAcc_centroids = calc_centroids_array(trainAcc_window, n_cluster)\n",
" trainGyr_centroids = calc_centroids_array(trainGyr_window, n_cluster)\n",
" # Save centroids on disk for using later\n",
" with open('trainTestCorpus_m1/trainAcc.centroids', 'wb') as fp:\n",
" pickle.dump(trainAcc_centroids, fp)\n",
" with open('trainTestCorpus_m1/trainGyr.centroids', 'wb') as fp: \n",
" pickle.dump(trainGyr_centroids, fp)\n",
" \n",
" \n",
" \n",
" #3. Map centroids to characters\n",
......@@ -230,20 +248,25 @@
" \n",
" trainAcc_count = trainAcc.shape[0]\n",
" trainGyr_count = trainGyr.shape[0]\n",
" testAcc_count = testAcc.shape[0]\n",
" testGyr_count = testGyr.shape[0] \n",
" \n",
" \n",
" #4 Assign subsequences to cluster centre and replace subsequence with alphabet of cluster centre\n",
" trainAcc_charsSeq = mapCodewordsToChars(trainAcc_window, trainAcc_centroids,trainAcc_count)\n",
" trainGyr_charsSeq = mapCodewordsToChars(trainGyr_window, trainGyr_centroids,trainGyr_count) \n",
" trainGyr_charsSeq = mapCodewordsToChars(trainGyr_window, trainGyr_centroids,trainGyr_count) \n",
" testAcc_charsSeq = mapCodewordsToChars(testAcc_window, trainAcc_centroids, testAcc_count)\n",
" testGyr_charsSeq = mapCodewordsToChars(testGyr_window, trainGyr_centroids, testGyr_count)\n",
" \n",
" \n",
" \n",
" #5. Create train and test bag of words\n",
" trainBow = create_words(trainAcc_charsSeq, trainGyr_charsSeq)\n",
" testBow = create_words(testAcc_charsSeq, testGyr_charsSeq) \n",
" \n",
" \n",
" \n",
" return trainBow"
" return trainBow, testBow"
]
}
],
......
......@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
......@@ -113,14 +113,14 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 1 2 3 4 5]\n"
"here you are [0 1 2 3 4 5]\n"
]
}
],
......@@ -136,17 +136,25 @@
"for i in list(set(true_labels)):\n",
" samples_per_class[i] = np.where((true_labels==i))[0]\n",
"\n",
"print(np.unique(true_labels))"
"print('here you are', np.unique(true_labels))"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"FINISH!\n"
]
}
],
"source": [
"for window_length in range(0, 40, 5):\n",
" for n_clusters in range(0, 50, 3): \n",
"for window_length in range(5, 40, 5):\n",
" for n_clusters in range(5, 30, 3): \n",
" train_docs = codebook_approach(window_length, n_clusters) \n",
" id2word = corpora.Dictionary(train_docs)\n",
" corpus = [id2word.doc2bow(doc) for doc in train_docs] \n",
......@@ -162,7 +170,7 @@
" acc = accuracy_score(true_labels, pred_labels)\n",
" p = [window_length, n_clusters, rdst, acc]\n",
" #print('%d, %d, %.4f, %.4f' % (window_length, n_clusters, idftreshold, acc))\n",
" with open('scoresAugCorpus.txt', 'a') as file: # save output in file \n",
" with open('paramtuning_scores0.txt', 'a') as file: # save output in file \n",
" s = ['[', ']', ',']\n",
" p = str(list(p))\n",
" for e in s:\n",
......@@ -175,9 +183,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"(7352, 36)"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_docs.shape"
]
......
This diff is collapsed.
5 5 0 0.5750816104461371
5 5 1 0.6386017410228509
5 5 2 0.551006528835691
5 5 3 0.5841947769314473
5 5 4 0.5622959738846572
5 5 5 0.6445865070729053
5 5 6 0.49088683351468987
5 5 7 0.5567192600652884
5 5 8 0.6225516866158868
5 5 9 0.610854189336235
5 8 0 0.5977965179542981
5 8 1 0.39050598476605003
5 8 2 0.4549782372143634
5 8 3 0.47551686615886835
5 8 4 0.5597116430903155
......@@ -6,7 +6,7 @@
"metadata": {},
"outputs": [],
"source": [
"%run createSensoryWords_fs4.ipynb\n",
"%run createSensoryWords.ipynb\n",
"\n",
"import numpy as np\n",
"import pickle\n",
......@@ -27,7 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"train_docs, test_docs = codebook_approach(35, 23) # best params: 35, 23"
"train_docs, test_docs = codebook_approach(30, 29) # best params: 30, 29"
]
},
{
......@@ -37,10 +37,10 @@
"outputs": [],
"source": [
"# serialize a train_docs to disk for later use\n",
"with open('trainTestCorpus_m2c/corpus.train', 'wb') as fp:\n",
"with open('trainTestCorpus_m1/corpus.train', 'wb') as fp:\n",
" pickle.dump(train_docs, fp)\n",
"\n",
"with open('trainTestCorpus_m2c/corpus.test', 'wb') as fp:\n",
"with open('trainTestCorpus_m1/corpus.test', 'wb') as fp:\n",
" pickle.dump(test_docs, fp)"
]
},
......@@ -52,7 +52,7 @@
{
"data": {
"text/plain": [
"(7352, 36)"
"(7352, 42)"
]
},
"execution_count": 4,
......@@ -220,7 +220,7 @@
" acc = accuracy_score(y_train, pred_labels)\n",
" p = [rdst, acc]\n",
" \n",
" with open('randomSeed_scors_m2cNew.txt', 'a') as file: # save output in file \n",
" with open('randomSeed_scors_m1.txt', 'a') as file: # save output in file \n",
" s = ['[', ']', ',']\n",
" p = str(list(p))\n",
" for e in s:\n",
......
This diff is collapsed.
5 5 0 0.5350924918389554
5 5 1 0.5327801958650707
5 5 2 0.6041893362350381
5 5 3 0.41988574537540807
5 5 4 0.5028563656147987
5 5 5 0.530467899891186
5 5 6 0.5703210010881393
5 5 7 0.5613438520130577
5 5 8 0.5633841131664853
5 5 9 0.5866430903155604
5 8 0 0.4884385201305767
5 8 1 0.48667029379760607
5 8 2 0.4474972796517954
5 8 3 0.48639825897714906
5 8 4 0.4510337323177367
5 8 5 0.4736126224156692
5 8 6 0.5607997823721437
5 8 7 0.4273667029379761
5 8 8 0.4820457018498368
5 8 9 0.4812295973884657
5 11 0 0.4432807399347116
5 11 1 0.4171653971708379
5 11 2 0.4915669205658324
5 11 3 0.4692600652883569
5 11 4 0.4806855277475517
5 11 5 0.4158052230685528
5 11 6 0.5799782372143635
5 11 7 0.47170837867247006
5 11 8 0.5
5 11 9 0.41240478781284007
5 14 0 0.5442056583242655
5 14 1 0.5070729053318824
5 14 2 0.43008705114254625
5 14 3 0.4623231773667029
5 14 4 0.565424374319913
5 14 5 0.39186615886833515
5 14 6 0.419069640914037
5 14 7 0.4390642002176279
5 14 8 0.470620239390642
5 14 9 0.5035364526659413
5 17 0 0.4821817192600653
5 17 1 0.43457562568008706
5 17 2 0.4242383025027203
5 17 3 0.4809575625680087
5 17 4 0.5069368879216539
5 17 5 0.48190968443960824
5 17 6 0.4623231773667029
5 17 7 0.558487486398259
5 17 8 0.42995103373231774
5 17 9 0.4529379760609358
5 20 0 0.44518498367791076
5 20 1 0.4956474428726877
5 20 2 0.5073449401523396
5 20 3 0.5444776931447225
5 20 4 0.4438248095756257
5 20 5 0.5236670293797606
5 20 6 0.49088683351468987
5 20 7 0.5014961915125136
5 20 8 0.5624319912948857
5 20 9 0.4885745375408052
5 23 0 0.5254352557127312
5 23 1 0.49823177366702937
5 23 2 0.5711371055495104
5 23 3 0.5209466811751904
5 23 4 0.5923558215451578
5 23 5 0.5719532100108814
5 23 6 0.5912676822633297
5 23 7 0.47850924918389554
5 23 8 0.6048694232861807
5 23 9 0.5248911860718172
5 26 0 0.5038084874863983
5 26 1 0.4164853101196953
5 26 2 0.3854733405875952
5 26 3 0.404923830250272
5 26 4 0.40070729053318827
5 26 5 0.4880304678998912
5 26 6 0.463139281828074
5 26 7 0.4794613710554951
5 26 8 0.4468171926006529
5 26 9 0.463411316648531
5 29 0 0.4881664853101197
5 29 1 0.536316648531012