#layer_2
Explore tagged Tumblr posts
bydfi · 3 years ago
Text
Tumblr media
#cryptocurrency #layer_2 #cryptocurrency_investment Blockchain Technology: What Is Layer 1 Scaling Solution in Blockchain?: Photo by Clint Adair on Unsplash Blockchain technology creates an avenue for humans to access a trustless and decentralized… http://dlvr.it/SMCGfB
0 notes
antonballdeluxe · 3 years ago
Text
Tumblr media Tumblr media
commission from anonymous commission's still open, by the way!
2 notes · View notes
xlainx · 6 years ago
Audio
this whole mixtape is blowing my mind tbh
1 note · View note
pattern-recognition · 4 years ago
Photo
Tumblr media Tumblr media Tumblr media
serial experiments lain - Cyberia Layer_2
119 notes · View notes
infornographi · 6 years ago
Video
youtube
New album [ Cyberia Layer_2 ] will be released on July 14th, 2018 for SEL’s 20th anniversary. Featuring DJs and musicians Wasei “JJ” Chikada, Q’hey, Ko Kimura, Hideo Kobayashi, TaQ, Keisuke Onuki, & Watusi (from COLDFEET). Art by Yoshitoshi ABe.
66 notes · View notes
nakanotonio · 6 years ago
Link
yabai
0 notes
Text
Recursive Neural Networks
I am writing a neural network zoo to test models against one another. The most recent model is a recursive sort of network that has a MLP for each operator and a MLP for the global settings. It has a couple of hidden layers that take the concatenated output of all of the networks and tries to use that output to predict a patch. Lowest mean squared error score so far, 0.289!
################################################################import sys
import os sys.path.append(os.path.join(os.path.dirname(__file__), '../lazyloadingutils')) import tensorflow as tf from lazyloading import define_scope
# Other: 0 - 22 # OP1:  23 - 44 # OP2:  45 - 66 # OP3:  67 - 88 # OP4:  89 - 110 # OP5: 111 - 132 # OP6: 133 - 154
# What if the global params were predicted first, and then fed the histogram # of feature data to help choose the operator? Write a new class. Also what if # one network was just trained solely on global params, then that network's # input was passed in with the features to a MLP or something.
class RecursiveMLP:
   def __init__(self, **kwargs):        self.features = kwargs.get('features', None)        self.labels = kwargs.get('labels', None)        self.input_size = kwargs.get('input_size', None)        self.parameters = kwargs.get('parameters', [2, 2])        self.amount_layers = len(self.parameters)        self.learning_rate = kwargs.get('learning_rate', 0.001)        self.prob_keep_input = kwargs.get('prob_keep_input', None)        self.prob_keep_hidden = kwargs.get('prob_keep_hidden', None)        self.prediction        self.optimise        self.error
   @define_scope    def prediction(self):        def init_weights(shape, name):            return tf.Variable(tf.random_normal(shape, stddev=0.01), name=name)        number_inputs = int(self.features.get_shape()[1]) * int(self.features.get_shape()[2])        x = tf.reshape(self.features, [-1, number_inputs])
       number_outputs_op = 22        number_outputs_other = 23
       operators_outputs = []
       weights = []        biases = []
       for op in range(7):            if op < 6:                amount_outputs = number_outputs_op                name_prefix = "op_"            else:                amount_outputs = number_outputs_other                name_prefix = "_other"            weights_layer = []            biases_layer = []            weights_layer += [init_weights([number_inputs, self.parameters[0]], name_prefix + str(op) + "_weights_hidden_0")]            biases_layer += [init_weights([self.parameters[0]], name_prefix + str(op) + "_biases_hidden_0")]            for i, layer in enumerate(self.parameters):                weights_name = name_prefix + str(op) + "_weights_hidden_" + str(i + 1)                biases_name = name_prefix + str(op) + "_biases_hidden_" + str(i + 1)                if i == (self.amount_layers - 1):                    weights_layer += [init_weights([self.parameters[(self.amount_layers - 1)], amount_outputs], weights_name)]                    biases_layer += [init_weights([amount_outputs], biases_name)]                else:                    weights_layer += [init_weights([self.parameters[i], self.parameters[i + 1]], weights_name)]                    biases_layer += [init_weights([self.parameters[i + 1]], biases_name)]            weights += [weights_layer]            biases += [biases_layer]            operators_outputs += [x]
           for i in range(len(weights[0])):
               if i < (len(weights) - 1):                    with tf.name_scope(name_prefix + str(op) + "_Layer_" + str(i)):                        prob = self.prob_keep_input if i == 0 else self.prob_keep_input
                       operators_outputs[op] = tf.nn.dropout(operators_outputs[op], prob)                        operators_outputs[op] = tf.add(tf.matmul(operators_outputs[op], weights[op][i]), biases[op][i])                        operators_outputs[op] = tf.nn.relu(operators_outputs[op])                else:                    with tf.name_scope(name_prefix + str(op) + "_Output"):                        operators_outputs[op] = tf.nn.dropout(operators_outputs[op], self.prob_keep_hidden)                        operators_outputs[op] = tf.add(tf.matmul(operators_outputs[op], weights[op][i]), biases[op][i])                tf.summary.histogram(name_prefix + str(op) + "_weights_" + str(i) + "_summary", weights[op][i])        all_operators = tf.concat(1, [o for o in operators_outputs[0:6]])        global_params = operators_outputs[6]        predicted_patch = tf.concat(1, [global_params, all_operators])
       fully_1 = 70        fully_2 = 70        number_outputs = 155
       fully_connected_layer_weights = {            'h1': tf.Variable(tf.random_normal([number_outputs, fully_1])),            'h2': tf.Variable(tf.random_normal([fully_1, fully_2])),            'out': tf.Variable(tf.random_normal([fully_2, number_outputs]))        }        fully_connected_layer_biases = {            'b1': tf.Variable(tf.random_normal([fully_1])),            'b2': tf.Variable(tf.random_normal([fully_2])),            'out': tf.Variable(tf.random_normal([number_outputs]))        }        layer_1 = tf.add(tf.matmul(predicted_patch, fully_connected_layer_weights['h1']), fully_connected_layer_biases['b1'])        layer_1 = tf.nn.relu(layer_1)        layer_2 = tf.add(tf.matmul(layer_1, fully_connected_layer_weights['h2']), fully_connected_layer_biases['b2'])        layer_2 = tf.nn.relu(layer_2)        out_layer = tf.matmul(layer_2, fully_connected_layer_weights['out']) + fully_connected_layer_biases['out']        return out_layer
   @define_scope    def optimise(self):        optimiser = tf.train.AdamOptimizer(learning_rate=self.learning_rate)        return optimiser.minimize(self.error)
   @define_scope    def error(self):        return tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.labels, self.prediction))))
0 notes
bydfi · 3 years ago
Text
Tumblr media
Blockchain Technology: What Is Layer 1 Scaling Solution in Blockchain? http://dlvr.it/SMCBBN
0 notes
infornographi · 6 years ago
Video
youtube
New album [ Cyberia Layer_2 ] will be released on July 14th, 2018 for SEL’s 20th anniversary. Featuring DJs and musicians Wasei “JJ” Chikada, Q’hey, Ko Kimura, Hideo Kobayashi, TaQ, Keisuke Onuki, & Watusi (from COLDFEET). Art by Yoshitoshi ABe.
28 notes · View notes