From b8320a8a32ca8f4321b273e63198b8c55d2951c0 Mon Sep 17 00:00:00 2001
From: "Dr. Hamed Khalili" <hamedkhalili@uni-koblenz.de>
Date: Fri, 23 Jun 2023 13:23:38 +0000
Subject: [PATCH] Upload New File

---
 p_1/code/tmux_covid_hb.txt | 368 +++++++++++++++++++++++++++++++++++++
 1 file changed, 368 insertions(+)
 create mode 100644 p_1/code/tmux_covid_hb.txt

diff --git a/p_1/code/tmux_covid_hb.txt b/p_1/code/tmux_covid_hb.txt
new file mode 100644
index 0000000..ca6fd50
--- /dev/null
+++ b/p_1/code/tmux_covid_hb.txt
@@ -0,0 +1,368 @@
+###******************************HALLO*******************************
+
+
+###***************************BAYESIAN GOVERMENT COVID APPLICATION**************************************************WRITEN BY:
+###********************************************************************************************************HAMED KHALILI***********
+###***************************INPUTS OF THE PROGRAM********************************************************************************************
+import pandas as pd
+import base64
+
+
+azResults=[]
+Results=[]
+countries=['Netherlands','Czechia','Lithuania','Austria','Poland','Slovenia','Estonia','Italy','Slovakia','Ireland','Denmark',
+        'Iceland','Cyprus','Greece','Belgium','Bulgaria','France','Germany','Latvia','Spain','Norway','Romania','Liechtenstein',
+         'Portugal','Luxembourg','Hungary','Malta','Croatia','Finland','Sweden']#
+for tage in [21]:
+    for massnahme in [['ClosDaycare','ClosDaycarePartial'],['ClosDaycare'],['ClosPrim','ClosPrimPartial'],['ClosPrim'],
+              ['RestaurantsCafes','RestaurantsCafesPartial'],['RestaurantsCafes'],['GymsSportsCentres','GymsSportsCentresPartial'],['GymsSportsCentres'],
+              ['Teleworking','TeleworkingPartial','WorkplaceClosuresPartial','WorkplaceClosures'],['Teleworking','WorkplaceClosures'],
+              ['MasksMandatoryClosedSpacesPartial','MasksMandatoryAllSpaces','MasksMandatoryClosedSpaces','MasksMandatoryAllSpacesPartial'],
+              ['MasksMandatoryAllSpaces','MasksMandatoryClosedSpaces']]:
+        incidence_days_number=tage
+        print(incidence_days_number)
+        X=massnahme
+        print(X)
+        
+
+#X=['MasksMandatoryAllSpaces','MasksMandatoryClosedSpaces']
+
+
+        method="hierarchical"#or#pooled#or#unpooled
+        ###********************************************************************************************************************************************
+        #['ClosDaycare','ClosDaycarePartial','ClosPrimPartial','ClosSecPartial','ClosPrim','ClosSec','ClosHighPartial','ClosHigh']
+        #['RestaurantsCafes','RestaurantsCafesPartial']
+        #['GymsSportsCentres','GymsSportsCentresPartial']
+        #['Teleworking','TeleworkingPartial','WorkplaceClosuresPartial','AdaptationOfWorkplace','AdaptationOfWorkplacePartial','WorkplaceClosures']
+        #['MasksMandatoryClosedSpacesPartial','MasksMandatoryAllSpaces','MasksMandatoryClosedSpaces','MasksMandatoryAllSpacesPartial']
+        #y_pred =this is almost identical to y_est except we do not specify the observed data. PyMC considers this to be a stochastic node 
+        #(as opposed to an observed node) and as the MCMC sampler runs - it also samples data from y_est.
+
+        """
+        ['Netherlands','Czechia','Lithuania','Austria','Poland','Slovenia','Estonia','Italy','Slovakia','Ireland','Denmark',
+        'Iceland','Cyprus','Greece','Belgium','Bulgaria','France','Germany','Latvia','Spain','Norway','Romania','Liechtenstein',
+         'Portugal','Luxembourg','Hungary','Malta','Croatia','Finland','Sweden']
+
+        ['EntertainmentVenuesPartial','RestaurantsCafesPartial','EntertainmentVenues','MassGatherAll','ClosSec','GymsSportsCentresPartial','ClosPrim',
+         'NonEssentialShopsPartial','ClosPubAnyPartial','RestaurantsCafes','GymsSportsCentres','MassGather50','PrivateGatheringRestrictions',
+         'MassGatherAllPartial',
+         'ClosHigh','NonEssentialShops','ClosSecPartial','OutdoorOver500','ClosDaycare','BanOnAllEvents','IndoorOver500','QuarantineForInternationalTravellers',
+         'ClosHighPartial','IndoorOver100','Teleworking','ClosPubAny','PlaceOfWorshipPartial','MasksMandatoryClosedSpacesPartial','MassGather50Partial',
+         'StayHomeOrderPartial','OutdoorOver100','IndoorOver50','ClosPrimPartial','PrivateGatheringRestrictionsPartial','MasksMandatoryClosedSpaces',
+         'OutdoorOver1000','TeleworkingPartial','MasksMandatoryAllSpaces','OutdoorOver50','StayHomeOrder','QuarantineForInternationalTravellersPartial',
+         'MasksMandatoryAllSpacesPartial','StayHomeGen','PlaceOfWorship','ClosDaycarePartial','IndoorOver1000','BanOnAllEventsPartial',
+         'HotelsOtherAccommodationPartial',
+         'StayHomeRiskG','ClosureOfPublicTransportPartial','AdaptationOfWorkplace','HotelsOtherAccommodation','MasksVoluntaryClosedSpacesPartial',
+         'RegionalStayHomeOrderPartial','AdaptationOfWorkplacePartial','MasksVoluntaryAllSpaces','MasksVoluntaryAllSpacesPartial','MasksVoluntaryClosedSpaces',
+         'SocialCircle','WorkplaceClosures','RegionalStayHomeOrder','ClosureOfPublicTransport','StayHomeGenPartial','WorkplaceClosuresPartial',
+         'StayHomeRiskGPartial','SocialCirclePartial']
+
+
+         """
+        ###************MAIN BODY OF THE PROGRAM****************************************************************************************************
+
+        def add_elemant(element,lis,j):
+                for i in lis:
+                    if element in i:
+                        return i[j]
+
+
+        colors = ['#348ABD', '#A60628', '#7A68A6', '#467821', '#D55E00', '#CC79A7', '#56B4E9', '#009E73', '#F0E442', '#0072B2']
+        import pandas as pd
+        import datetime
+        from datetime import timedelta
+        import warnings
+        warnings.filterwarnings("ignore", category=FutureWarning)
+        import seaborn as sns
+        import arviz as az
+        import itertools
+        import matplotlib.pyplot as plt
+        import numpy as np
+        import pymc3 as pm
+        import scipy
+        import scipy.stats as stats
+        from IPython.display import Image
+        from sklearn import preprocessing
+        import pandas as pd
+        import datetime
+        from IPython.display import display
+
+        def f(r,tr):
+                    for i in tr:
+
+                        if i in r:
+                            return "+"#+str(tr).replace(",", " or")
+
+                    return "-"#+str(tr).replace(",", " or")
+        def std():
+            l=[]
+            for idx, name in (enumerate(CBook.iloc[:,5].value_counts().index.tolist())):
+                    l.append( [name,CBook.iloc[:,5].value_counts().tolist()[idx],CBook.loc[CBook.iloc[:,5] == name, mass].std()])
+            return l#CBook[str(evaluation_object)+' '+str(evaluation_criteria)+' '+'std'] = CBook.apply(lambda row: add_second_elemant(row[evaluation_object],l), axis=1)
+
+        def mean():
+            l=[]
+            for idx, name in (enumerate(CBook.iloc[:,5].value_counts().index.tolist())):
+                    l.append( [name,CBook.iloc[:,5].value_counts().tolist()[idx],CBook.loc[CBook.iloc[:,5] == name, mass].mean()])
+            return l
+        cb =pd.read_excel("cb.xlsx")
+        measures =pd.read_excel("measures.xlsx")
+
+
+
+
+
+        #if method=="hierarchical":
+
+
+
+            #liss=[]
+
+
+        responses_plus=[]
+        responses_minus=[]
+        liss_plus = pd.DataFrame({'country':[],'mean':[],'std':[],'count+':[],'count-':[],'mean+':[],'mean-':[],'std+':[],'std-':[]})
+        for idx, name in (enumerate(cb['countriesAndTerritories'].value_counts().index.tolist())):
+        #for name in['Germany']:
+            land=name
+            cb_2=cb.loc[(cb['countriesAndTerritories'] == land)]
+            cb_2=cb_2[['dateRep','cases']].iloc[::-1]
+            #cb=cb[(cb['dateRep']>=datetime.date(2020,2,3))] # df[(df['date']>datetime.date(2016,1,1))
+            cb_2['dateRep'] = pd.to_datetime(cb_2['dateRep'], format='%d/%m/%Y')
+            #cb_2=cb_2.loc[cb_2['dateRep']>='2020-03-02']
+            cb_2['cases'].values[cb_2['cases']<0] = cb_2['cases'].values[cb_2['cases']<0]*(-1)
+            cb_2['cases'].values[cb_2['cases']==0]=1
+            if cb_2.isnull().values.any():
+                cb_2['cases']=cb_2['cases'].interpolate()
+
+            cb_2=cb_2.reset_index()
+            measures_2=measures.loc[(measures['Country'] == land)]
+            if measures_2.isnull().values.any():
+                #print(measures_2[measures_2['E'].isna()])
+                measures_2=measures_2.dropna()
+            measures_2=measures_2.reset_index()
+            cooBook=cb_2
+            lst=[]
+            cooBook["Rescd"] = [list() for x in range(len(cooBook.index))]
+            measures_2['A'] = pd.to_datetime(measures_2['A'], format='%d/%m/%Y')
+            measures_2['E'] = pd.to_datetime(measures_2['E'], format='%d/%m/%Y')
+            for i in range (0,len(measures_2)):
+                #col="rc"#(measures['Measure'][i])
+                A=(measures_2['A'][i])
+                E=(measures_2['E'][i]++ timedelta(days=1))
+                #if col not in coBook.columns:
+                    #coBook[col] = pd.Series(dtype='int')
+
+                for j in range (0,len(cooBook)): 
+                    date=cooBook['dateRep'][j]
+                    #date_E=coBook['time_iso8601'][j-1]
+                    #if coBook[col][j] != 1:
+                    if date>=A and date<=E:
+                        cooBook["Rescd"][j].append(measures_2['Measure'][i])
+            CBook=cooBook
+
+        #CBook['daily_cases']=CBook['daily_cases'].diff()
+        #CBook=CBook[1::]
+        #CBook=CBook.reset_index()
+
+            CBook['rpn_minus_one'+str(incidence_days_number)] = pd.Series(dtype='float')
+            CBook = CBook.replace(np.nan, 0)
+            for i in range (0,len(CBook)-incidence_days_number):
+                caesdayNplusone=CBook.loc[i+incidence_days_number, ['cases']].to_list()[0]
+                averagecasesdayonetoNminusone=sum(CBook.iloc[i:i+incidence_days_number]['cases'].values)/incidence_days_number
+                #if averagecasesdayonetoNminusone ==0:
+                    #print("00000000")
+                CBook.loc[i, ['rpn_minus_one'+str(incidence_days_number)]]=caesdayNplusone/averagecasesdayonetoNminusone-1
+
+
+            #operator="or"
+            mass='rpn_minus_one'+str(incidence_days_number)
+            #CBook[mass]=CBook[mass].pct_change()
+            #CBook=CBook.iloc[1: , :]
+            #CBook[[mass]] = CBook[[mass]].apply(lambda x: 100*x) 
+            data_collecting_start_date=min(measures_2['A'])
+            data_collecting_end_date=max(measures_2['E'])
+            if land== 'Hungary':#03/03/2020-11/06/2021 for Hungary
+                data_collecting_start_date='2020-03-11'
+                data_collecting_end_date='2021-06-11'
+            if land== 'Iceland':#23/07/2020-03/12/2021 for 
+                data_collecting_start_date='2020-07-23'
+                data_collecting_end_date='2021-12-03'   
+            if land== 'Liechtenstein':# 01/10/2020-24/10/2022 for Liechtenstein 
+                data_collecting_start_date='2020-10-01'
+                data_collecting_end_date='2022-03-31'
+            
+            CBook=CBook.loc[ (CBook['dateRep']>=data_collecting_start_date)  ]
+            CBook=CBook.loc[ (CBook['dateRep']<=data_collecting_end_date)  ]
+
+
+            CBook[str(X[0][:10]).replace(",", " or")+''+ 'Rescd'] = CBook.apply(lambda row: land+f(row['Rescd'],X), axis=1)
+            #liss.append([land,mean(),std()])
+
+            info ={'country':land,'mean':CBook.iloc[:,4].mean(),'std':CBook.iloc[:,4].std(),'count+':add_elemant(land+'+',mean(),1),'count-':add_elemant(land+'-',mean(),1),'mean+':add_elemant(land+'+',mean(),2),'mean-':add_elemant(land+'-',mean(),2),'std+':add_elemant(land+'+',std(),2),'std-':add_elemant(land+'-',std(),2)}
+            liss_plus = liss_plus.append(info, ignore_index = True)
+            liss_plus=liss_plus.fillna(0)
+            if land in countries:
+
+
+                CBook_plus=CBook[(CBook.iloc[:,5] == land+"+")]
+                CBook_minus=CBook[(CBook.iloc[:,5] == land+"-")]
+                le_plus = preprocessing.LabelEncoder()
+                le_minus = preprocessing.LabelEncoder()
+                rc=str(X[0][:10]).replace(",", " or")+''+ 'Rescd'
+                clm_plus=CBook_plus[rc]
+                clm_minus=CBook_minus[rc]
+                response_idx_plus = le_plus.fit_transform(clm_plus)
+                response_idx_minus = le_minus.fit_transform(clm_minus)
+                response_plus = le_plus.classes_
+                response_minus = le_minus.classes_
+                #number_of_response_plus=len(response_plus)
+                #number_of_response_minus=len(response_minus)
+                #for i in range(0, number_of_response_codes):
+                if len(response_plus)==0:
+                    response_plus=[land+"+",[]]
+                    responses_plus.append(response_plus)
+                else:    
+                    response_plus[0]=[response_plus[0],CBook_plus[clm_plus==response_plus[0]][mass].values.tolist()]
+                    responses_plus.append(response_plus[0])
+                if len(response_minus)==0:
+                    response_minus=[land+"-",[]]
+                    responses_minus.append(response_minus)
+                else:
+                    response_minus[0]=[response_minus[0],CBook_minus[clm_minus==response_minus[0]][mass].values.tolist()]
+                    responses_minus.append(response_minus[0])
+        #liss
+
+
+
+        #export_excel = liss_plus.to_excel (r"C:\Users\Hamed\Desktop\liss_plus.xlsx")
+        data_mean_positive = np.repeat(liss_plus['mean+'].values.tolist(),liss_plus['count+'].values.tolist())
+        mean_mean_positive=data_mean_positive.mean()
+        mean_std_positive=data_mean_positive.std()
+        data_std_positive = np.repeat(liss_plus['std+'].values.tolist(),liss_plus['count+'].values.tolist())
+        #std_mean_positive=data_std_positive.mean()
+        #std_std_positive=data_std_positive.std()
+        std_min_positive=data_std_positive.min()
+        std_max_positive=data_std_positive.max()
+        data_mean_negative = np.repeat(liss_plus['mean-'].values.tolist(),liss_plus['count-'].values.tolist())
+        mean_mean_negative=data_mean_negative.mean()
+        mean_std_negative=data_mean_negative.std()
+        data_std_negative = np.repeat(liss_plus['std-'].values.tolist(),liss_plus['count-'].values.tolist())
+        #std_mean_negative=data_std_negative.mean()
+        #std_std_negative=data_std_negative.std()
+        std_min_negative=data_std_negative.min()
+        std_max_negative=data_std_negative.max()
+
+        #cb =pd.read_excel(r"C:\Users\Hamed\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Anaconda3 (64-bit)\datac.xlsx")
+        #measures =pd.read_excel(r"C:\Users\Hamed\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Anaconda3 (64-bit)\response_graphs.xlsx")
+
+
+
+        #CBook
+
+
+
+        #mean_mean_positive   mean_std_positive   std_mean_positive   std_std_positive  country_mean_positive country_std_positive
+        #uncertainty=0.1
+        #import math
+        with pm.Model() as model:
+            hyper_mu_parameter_positive=pm.Normal('hyper_mu_parameter_positive', mu=mean_mean_positive,sd=mean_std_positive)#
+            hyper_sd_parameter_positive=pm.Uniform('hyper_sd_parameter_positive', lower=std_min_positive,upper=std_max_positive)#pm.Exponential("hyper_sd_parameter", lam=1/std_mean_positive)#
+            #hyper_sd_error_parameter=pm.Uniform('hyper_sd_error_parameter', lower=std_min_positive,upper=std_max_positive)
+            hyper_mu_parameter_negative=pm.Normal('hyper_mu_parameter_negative', mu=mean_mean_negative,sd=mean_std_negative)#
+            hyper_sd_parameter_negative=pm.Uniform('hyper_sd_parameter_negative', lower=std_min_negative,upper=std_max_negative)
+
+            hyper_nu_parameter_plus=pm.Uniform('hyper_nu_parameter_plus', lower=0,upper=30)
+            hyper_nu_parameter_minus=pm.Uniform('hyper_nu_parameter_minus', lower=0,upper=30)
+            #phi_mean=pm.Uniform('phi_mean', lower=0,upper=1)
+            #phi_std=pm.Uniform('phi_std', lower=0,upper=1)
+            mu = dict()
+            sd=dict()
+            incidence = dict()
+            incidence_pred=dict()
+            #name_plus=responses_plus[0][0]
+            #observed_plus=responses_plus[0][1]
+
+            #name_minus=responses_minus[0][0]
+            #observed_minus=responses_minus[0][1]
+                #nu[name] = pm.Uniform('nu_'+name, lower=0,upper=30)
+            for name_plus,observed_plus in responses_plus:
+                std_land=liss_plus.loc[(liss_plus['country'] == name_plus[:-1])].iloc[:,2].to_list()[0]
+                mu[name_plus] = pm.Normal('mu_'+name_plus, mu=hyper_mu_parameter_positive,sd=hyper_sd_parameter_positive)
+                sd[name_plus] = pm.Exponential('sd_'+name_plus, lam=1/std_land)
+                #if len(observed_plus)==0:
+                    #incidence[name_plus] = pm.StudentT(name_plus,nu=hyper_nu_parameter_plus, mu=mu[name_plus], sigma=sd[name_plus]  )
+                if len(observed_plus)!=0:
+                    incidence[name_plus] = pm.StudentT(name_plus,nu=hyper_nu_parameter_plus, mu=mu[name_plus], sigma=sd[name_plus]  ,observed=observed_plus)
+                incidence_pred[name_plus] = pm.StudentT('incidence_pred'+name_plus,nu=hyper_nu_parameter_plus, mu=mu[name_plus], sigma=sd[name_plus]  )
+            for name_minus,observed_minus in responses_minus:
+                mu[name_minus] = pm.Normal('mu_'+name_minus, mu=hyper_mu_parameter_negative,sd=hyper_sd_parameter_negative)
+                sd[name_minus] = pm.Exponential('sd_'+name_minus, lam=1/std_land)
+                #if len(observed_minus)==0:
+                    #incidence[name_minus] = pm.StudentT(name_minus,nu=hyper_nu_parameter_minus, mu=mu[name_minus], sigma=sd[name_minus] )
+                if len(observed_minus)!=0:
+                    incidence[name_minus] = pm.StudentT(name_minus,nu=hyper_nu_parameter_minus, mu=mu[name_minus], sigma=sd[name_minus]  ,observed=observed_minus)
+                incidence_pred[name_minus] = pm.StudentT('incidence_pred'+name_minus,nu=hyper_nu_parameter_minus, mu=mu[name_minus], sigma=sd[name_minus]  )
+            sample_number=1000  
+
+            model_trace = pm.sample(sample_number,target_accept = 0.99)#,tune=2000,target_accept = 0.90
+            azsum=az.summary(model_trace)
+            azResults.append([incidence_days_number,X,list(azsum.index),list(azsum.columns),azsum.values.tolist()])
+
+
+
+
+
+
+
+
+
+
+
+        def prob_responsea_efficient_over_responseb(responsea, responseb):
+            l=[]
+            for i in range(1000):
+                a=model_trace.get_values('incidence_pred'+responsea)
+                np.random.shuffle(a)
+                b=model_trace.get_values('incidence_pred'+responseb)
+                np.random.shuffle(b)
+                l.append(np.float(sum(a < b))/len(a))
+            return l
+
+        #b=50
+
+        #r=[responses_plus,responses_minus]
+        #for i in range(0, len(responses_plus)):
+
+        #if len(responses_plus)==1:
+            #fig, ax = plt.subplots(1,1, figsize=(5, 5))
+            #ax.hist(prob_responsea_efficient_over_responseb(responses_plus[0][0],responses_minus[0][0]), #label="p(+<-)"+responses_plus[0]#[0][:-1])
+            #ax.legend(loc='best')
+            #ax.set_ylabel('frequency')
+            #ax.set_title('efficiency of + compared to -') 
+        resu=[]
+        for i in range(0,len(responses_plus)):
+            effdis=prob_responsea_efficient_over_responseb(responses_plus[i][0],responses_minus[i][0])
+            #axs[i].hist(effdis, label="p(+<-)"+responses_plus[i][0][:-1])
+            #axs[i].legend(loc='best')
+            #axs[i].set_ylabel('frequency')
+            #from statistics import mean
+            from statistics import mean
+            #print(mean(effdis)
+            resu.append([responses_plus[i][0][:-1],[min (effdis),mean(effdis),max(effdis)]])
+            #print(resu)
+        
+            #print(resu)
+
+        Results.append([incidence_days_number,X,resu])
+                 
+score=Results
+azscore=azResults
+with open('Resultsfile21.py', 'w') as f:
+    f.write('score = %s' % score)
+with open('azResultsfile21.py', 'w') as azf:
+    azf.write('azscore = %s' % azscore)
+
+
+
+
-- 
GitLab