Skip to content
Snippets Groups Projects
Commit 6210d437 authored by Dr. Hamed Khalili's avatar Dr. Hamed Khalili
Browse files

Upload New File

parent d82cb84d
Branches main
No related tags found
No related merge requests found
hidden_units = [256, 64,32]
learning_rate = 0.001
num_epochs = 100
sample = 100
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
#import tensorflow_datasets as tfds
import tensorflow_probability as tfp
#cat_nr=100
import pandas as pd
pd.set_option('display.max_columns', 500)
month_data_encoded_2=pd.read_excel(r"month_data_encoded_2.xlsx")
mde=month_data_encoded_2
#mde=mde[mde['rp_zeitraum'] < 20]
mde=mde.drop(['dateRep', 'cases' , 'Rescd' , '7days_before_mean','7days_after_mean', 'week' ,'year' , 'index', 'month', 'countriesAndTerritories', 'va' , 'vaccin'], axis=1)
mde = mde.reindex(sorted(mde.columns), axis=1)
#list(mde.columns.values)
mde['rp_zeitraum'] = pd.to_numeric(mde['rp_zeitraum'])
#mde['rp_zeitraum'] = mde['rp_zeitraum'].div(100)
import numpy as np
mde['rp_zeitraum'] = np.log2(mde['rp_zeitraum'])
#mde['rp_zeitraum'].hist(bins=100)
dataset = (
tf.data.Dataset.from_tensor_slices(
(
#listB
(tf.cast(mde['AY.4.2'].values, tf.float32),
tf.cast(mde['AdaptationOfWorkplace'].values, tf.float32),
tf.cast(mde['AdaptationOfWorkplacePartial'].values, tf.float32),
tf.cast(mde['B.1.1.529'].values, tf.float32),
tf.cast(mde['B.1.1.7'].values, tf.float32),
tf.cast(mde['B.1.1.7/E484K'].values, tf.float32),
tf.cast(mde['B.1.351'].values, tf.float32),
tf.cast(mde['B.1.427/B.1.429'].values, tf.float32),
tf.cast(mde['B.1.525'].values, tf.float32),
tf.cast(mde['B.1.616'].values, tf.float32),
tf.cast(mde['B.1.617.1'].values, tf.float32),
tf.cast(mde['B.1.617.2'].values, tf.float32),
tf.cast(mde['B.1.617.3'].values, tf.float32),
tf.cast(mde['B.1.620'].values, tf.float32),
tf.cast(mde['B.1.621'].values, tf.float32),
tf.cast(mde['BA.1'].values, tf.float32),
tf.cast(mde['BA.2'].values, tf.float32),
tf.cast(mde['BA.2/L452X'].values, tf.float32),
tf.cast(mde['BA.2.75'].values, tf.float32),
tf.cast(mde['BA.3'].values, tf.float32),
tf.cast(mde['BA.4'].values, tf.float32),
tf.cast(mde['BA.4/BA.5'].values, tf.float32),
tf.cast(mde['BA.5'].values, tf.float32),
tf.cast(mde['BQ.1'].values, tf.float32),
tf.cast(mde['BanOnAllEvents'].values, tf.float32),
tf.cast(mde['BanOnAllEventsPartial'].values, tf.float32),
tf.cast(mde['C.37'].values, tf.float32),
tf.cast(mde['ClosDaycare'].values, tf.float32),
tf.cast(mde['ClosDaycarePartial'].values, tf.float32),
tf.cast(mde['ClosHigh'].values, tf.float32),
tf.cast(mde['ClosHighPartial'].values, tf.float32),
tf.cast(mde['ClosPrim'].values, tf.float32),
tf.cast(mde['ClosPrimPartial'].values, tf.float32),
tf.cast(mde['ClosPubAny'].values, tf.float32),
tf.cast(mde['ClosPubAnyPartial'].values, tf.float32),
tf.cast(mde['ClosSec'].values, tf.float32),
tf.cast(mde['ClosSecPartial'].values, tf.float32),
tf.cast(mde['ClosureOfPublicTransport'].values, tf.float32),
tf.cast(mde['ClosureOfPublicTransportPartial'].values, tf.float32),
tf.cast(mde['EntertainmentVenues'].values, tf.float32),
tf.cast(mde['EntertainmentVenuesPartial'].values, tf.float32),
tf.cast(mde['GymsSportsCentres'].values, tf.float32),
tf.cast(mde['GymsSportsCentresPartial'].values, tf.float32),
tf.cast(mde['HotelsOtherAccommodation'].values, tf.float32),
tf.cast(mde['HotelsOtherAccommodationPartial'].values, tf.float32),
tf.cast(mde['IndoorOver100'].values, tf.float32),
tf.cast(mde['IndoorOver1000'].values, tf.float32),
tf.cast(mde['IndoorOver50'].values, tf.float32),
tf.cast(mde['IndoorOver500'].values, tf.float32),
tf.cast(mde['MasksMandatoryAllSpaces'].values, tf.float32),
tf.cast(mde['MasksMandatoryAllSpacesPartial'].values, tf.float32),
tf.cast(mde['MasksMandatoryClosedSpaces'].values, tf.float32),
tf.cast(mde['MasksMandatoryClosedSpacesPartial'].values, tf.float32),
tf.cast(mde['MasksVoluntaryAllSpaces'].values, tf.float32),
tf.cast(mde['MasksVoluntaryAllSpacesPartial'].values, tf.float32),
tf.cast(mde['MasksVoluntaryClosedSpaces'].values, tf.float32),
tf.cast(mde['MasksVoluntaryClosedSpacesPartial'].values, tf.float32),
tf.cast(mde['MassGather50'].values, tf.float32),
tf.cast(mde['MassGather50Partial'].values, tf.float32),
tf.cast(mde['MassGatherAll'].values, tf.float32),
tf.cast(mde['MassGatherAllPartial'].values, tf.float32),
tf.cast(mde['NonEssentialShops'].values, tf.float32),
tf.cast(mde['NonEssentialShopsPartial'].values, tf.float32),
tf.cast(mde['Other'].values, tf.float32),
tf.cast(mde['OutdoorOver100'].values, tf.float32),
tf.cast(mde['OutdoorOver1000'].values, tf.float32),
tf.cast(mde['OutdoorOver50'].values, tf.float32),
tf.cast(mde['OutdoorOver500'].values, tf.float32),
tf.cast(mde['P.1'].values, tf.float32),
tf.cast(mde['P.3'].values, tf.float32),
tf.cast(mde['PlaceOfWorship'].values, tf.float32),
tf.cast(mde['PlaceOfWorshipPartial'].values, tf.float32),
tf.cast(mde['PrivateGatheringRestrictions'].values, tf.float32),
tf.cast(mde['PrivateGatheringRestrictionsPartial'].values, tf.float32),
tf.cast(mde['QuarantineForInternationalTravellers'].values, tf.float32),
tf.cast(mde['QuarantineForInternationalTravellersPartial'].values, tf.float32),
tf.cast(mde['RegionalStayHomeOrder'].values, tf.float32),
tf.cast(mde['RegionalStayHomeOrderPartial'].values, tf.float32),
tf.cast(mde['RestaurantsCafes'].values, tf.float32),
tf.cast(mde['RestaurantsCafesPartial'].values, tf.float32),
tf.cast(mde['SGTF'].values, tf.float32),
tf.cast(mde['SocialCircle'].values, tf.float32),
tf.cast(mde['SocialCirclePartial'].values, tf.float32),
tf.cast(mde['StayHomeGen'].values, tf.float32),
tf.cast(mde['StayHomeGenPartial'].values, tf.float32),
tf.cast(mde['StayHomeOrder'].values, tf.float32),
tf.cast(mde['StayHomeOrderPartial'].values, tf.float32),
tf.cast(mde['StayHomeRiskG'].values, tf.float32),
tf.cast(mde['StayHomeRiskGPartial'].values, tf.float32),
tf.cast(mde['Teleworking'].values, tf.float32),
tf.cast(mde['TeleworkingPartial'].values, tf.float32),
tf.cast(mde['UNK'].values, tf.float32),
tf.cast(mde['WorkplaceClosures'].values, tf.float32),
tf.cast(mde['WorkplaceClosuresPartial'].values, tf.float32),
tf.cast(mde['XBB'].values, tf.float32),
tf.cast(mde['XBB.1.5'].values, tf.float32),
tf.cast(mde['countriesAndTerritories_0'].values, tf.float32),
tf.cast(mde['countriesAndTerritories_1'].values, tf.float32),
tf.cast(mde['countriesAndTerritories_2'].values, tf.float32),
tf.cast(mde['countriesAndTerritories_3'].values, tf.float32),
tf.cast(mde['countriesAndTerritories_4'].values, tf.float32),
tf.cast(mde['month_0'].values, tf.float32),
tf.cast(mde['month_1'].values, tf.float32),
tf.cast(mde['month_2'].values, tf.float32),
tf.cast(mde['month_3'].values, tf.float32),
tf.cast(mde['not_sequenced'].values, tf.float32),
tf.cast(mde['vaccin_0'].values, tf.float32),
tf.cast(mde['vaccin_1'].values, tf.float32),
tf.cast(mde['vaccin_2'].values, tf.float32),
tf.cast(mde['vaccin_3'].values, tf.float32),
tf.cast(mde['vaccin_4'].values, tf.float32))
,
tf.cast(mde['rp_zeitraum'].values, tf.float32)
)
)
)
def get_train_and_test_splits(train_size, batch_size=1):
# We prefetch with a buffer the same size as the dataset because th dataset
# is very small and fits into memory.
# We shuffle with a buffer the same size as the dataset.
train_dataset = (
dataset.take(train_size).shuffle(buffer_size=train_size).batch(batch_size)
)
test_dataset = dataset.skip(train_size).batch(batch_size)
return train_dataset, test_dataset
def run_experiment(model, loss, train_dataset, test_dataset):
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate),
loss=loss,
metrics=[keras.metrics.RootMeanSquaredError()],
)
print("Start training the model...")
model.fit(train_dataset, epochs=num_epochs, validation_data=test_dataset)
print("Model training finished.")
_, rmse = model.evaluate(train_dataset, verbose=0)
print(f"Train RMSE: {round(rmse, 3)}")
print("Evaluating model performance...")
_, rmse = model.evaluate(test_dataset, verbose=0)
print(f"Test RMSE: {round(rmse, 3)}")
FEATURE_NAMES = mde.drop(columns=['rp_zeitraum']).columns.values.tolist()
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(1,), dtype=tf.float32
)
return inputs
def create_baseline_model():
inputs = create_model_inputs()
input_values = [value for _, value in sorted(inputs.items())]
features = keras.layers.concatenate(input_values)
features = layers.BatchNormalization()(features)
# Create hidden layers with deterministic weights using the Dense layer.
for units in hidden_units:
features = layers.Dense(units, activation="sigmoid")(features)
# The output is deterministic: a single point estimate.
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
train_size = int(len(mde) * 0.75)
batch_size = 256
train_dataset, test_dataset = get_train_and_test_splits(train_size, batch_size)
# Define the prior weight distribution as Normal of mean=0 and stddev=1.
# Note that, in this example, the we prior distribution is not trainable,
# as we fix its parameters.
def prior(kernel_size, bias_size, dtype=None):
n = kernel_size + bias_size
prior_model = keras.Sequential(
[
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(n), scale_diag=tf.ones(n)
)
)
]
)
return prior_model
# Define variational posterior weight distribution as multivariate Gaussian.
# Note that the learnable parameters for this distribution are the means,
# variances, and covariances.
def posterior(kernel_size, bias_size, dtype=None):
n = kernel_size + bias_size
posterior_model = keras.Sequential(
[
tfp.layers.VariableLayer(
tfp.layers.MultivariateNormalTriL.params_size(n), dtype=dtype
),
tfp.layers.MultivariateNormalTriL(n),
]
)
return posterior_model
def create_bnn_model(train_size):
inputs = create_model_inputs()
features = keras.layers.concatenate(list(inputs.values()))
features = layers.BatchNormalization()(features)
# Create hidden layers with weight uncertainty using the DenseVariational layer.
for units in hidden_units:
features = tfp.layers.DenseVariational(
units=units,
make_prior_fn=prior,
make_posterior_fn=posterior,
kl_weight=1 / train_size,
activation="sigmoid",
)(features)
# The output is deterministic: a single point estimate.
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
mse_loss = keras.losses.MeanSquaredError()
train_sample_size = int(train_size)
small_train_dataset = train_dataset.unbatch().take(train_sample_size).batch(batch_size)
bnn_model = create_bnn_model(train_sample_size)
run_experiment(bnn_model, mse_loss, small_train_dataset, test_dataset)
examples, targets = list(test_dataset.unbatch().shuffle(batch_size * 10).batch(sample))[
0
]
baseline_model = create_baseline_model()
predicted = baseline_model(examples).numpy()
fl2=[]
for idx in range(sample):
fl2.append(f"Predicted: {round(float(predicted[idx][0]), 3)} - Actual: {targets[idx]}")
with open('Resultsfl2.py', 'w') as f:
f.write('fl2 = %s' % fl2)
bnn_model.save("bcnn_model_continuous")
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment