-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodel.py
43 lines (34 loc) · 1.18 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from multi_attention import SingleAttention,MultiAttention
from time2vector import Time2Vector
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
import numpy as np
from Transformer import TransformerEncoder
batch_size = 32
seq_len = 128
d_k = 256
d_v = 256
n_heads = 12
ff_dim = 256
def create_model():
'''Initialize time and transformer layers'''
time_embedding = Time2Vector(seq_len)
attn_layer1 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer2 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer3 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
'''Construct model'''
in_seq = Input(shape=(seq_len, 5))
x = time_embedding(in_seq)
x = Concatenate(axis=-1)([in_seq, x])
x = attn_layer1((x, x, x))
x = attn_layer2((x, x, x))
x = attn_layer3((x, x, x))
x = GlobalAveragePooling1D(data_format='channels_first')(x)
x = Dropout(0.1)(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.1)(x)
out = Dense(1, activation='linear')(x)
model = Model(inputs=in_seq, outputs=out)
model.compile(loss='mse', optimizer='adam', metrics=['mae', 'mape'])
return model