-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
63 lines (52 loc) · 2.3 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from pybloom_live import ScalableBloomFilter
import requests
import tensorflow as tf
class D2Wrapper:
def query_api(self, method, params):
params['key'] = self.steam_api_key
url = "https://api.steampowered.com/IDOTA2Match_570/{}/V001/".format(method)
response = requests.get(url, params = params)
ret = response.json()
return ret.get('result', {})
def get_match_history(self, params):
return self.query_api("GetMatchHistory", params)
def get_match_details(self, params):
return self.query_api("GetMatchDetails", params)
def __init__(self, steam_api_key):
self.steam_api_key = steam_api_key
class CustomSBF(ScalableBloomFilter):
def add_many(self, items):
for item in items:
self.add(item)
def get_non_members(self, items):
return [item for item in items if item not in self]
class NNModel:
def fit(self, X, Y):
print("Training started")
self.op_cache = tf.identity(self.output_layer)
self.sess.run([self.optimizer, self.cost], feed_dict = {self.x: X, self.y: Y})
print("Training finished")
def close(self):
self.sess.close()
def __init__(self, neurons, learning_rate = 0.01, seed = 128):
# define placeholders
self.x = tf.placeholder(tf.float32, [None, neurons[0]])
self.y = tf.placeholder(tf.float32, [None, neurons[-1]])
weights = []
biases = []
layers = []
for i in range(1, len(neurons) - 1):
weights.append(tf.Variable(tf.random_normal([neurons[i], neurons[i + 1]], seed=seed)))
biases.append(tf.Variable(tf.random_normal([neurons[i + 1]], seed=seed)))
# Build the network
prev = self.x
for i in range(len(weights)):
tmp = tf.add(tf.matmul(prev, weights[i]), biases[i])
tmp = tf.nn.relu(tmp)
prev = tmp
self.output_layer = tf.clip_by_value(prev, 1e-10, 0.9999999)
self.cost = -tf.reduce_mean(tf.reduce_sum(self.y * tf.log(self.output_layer) + (1 - self.y) * tf.log(1 - self.output_layer), axis=1))
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)