|
|
|
|
|
|
op="Flatten", |
|
|
|
input=[ |
|
|
|
inputs[-1] |
|
|
|
], # take only the last input, assume all other arguments are trivial (like sequence_length==1 always in ML-agents LSTM nets) |
|
|
|
], # take only the last input, assume all other arguments are trivial (like sequence_length==1 |
|
|
|
# always in ML-agents LSTM nets) |
|
|
|
), |
|
|
|
"Reshape": lambda nodes, inputs, tensors, context: Struct( |
|
|
|
op="Reshape", |
|
|
|
|
|
|
input=[i for i in inputs] |
|
|
|
+ [t.name for t in tensors][1:][ |
|
|
|
-2: |
|
|
|
], # [1:] - skips the 0th tensor, since Conv2DBackpropInput 0th tensor is 'input_sizes' (which differs from other Conv layers) |
|
|
|
], # [1:] - skips the 0th tensor, since Conv2DBackpropInput 0th tensor is 'input_sizes' |
|
|
|
# (which differs from other Conv layers) |
|
|
|
# [-2:] - take only last 2 tensors, this allows to process large patterns with the same code |
|
|
|
padding=get_attr(by_op(nodes, "Conv2DBackpropInput"), "padding"), |
|
|
|
strides=get_attr(by_op(nodes, "Conv2DBackpropInput"), "strides"), |
|
|
|
|
|
|
# TODO:'Round' |
|
|
|
# TODO:'Rsqrt' |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
# Debug |
|
|
|
def debug(s): |
|
|
|
|
|
|
end = end.astype(np.int32).tolist() |
|
|
|
strides = strides.astype(np.int32).tolist() |
|
|
|
|
|
|
|
# StridedSlice range and mask descriptions: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/strided-slice |
|
|
|
# StridedSlice range and mask descriptions: |
|
|
|
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/strided-slice |
|
|
|
# TODO: I don't think elipsis and newaxis would work together well with current implementation |
|
|
|
|
|
|
|
assert len(begin) == len(end) |
|
|
|
|
|
|
else: |
|
|
|
activation = "Linear" |
|
|
|
|
|
|
|
if not class_name in known_classes: |
|
|
|
if class_name not in known_classes: |
|
|
|
if class_name in requires_runtime_flag: |
|
|
|
print("SKIP:", class_name, "layer is used only for training") |
|
|
|
else: |
|
|
|
|
|
|
auto_pad = get_attr(layer, "padding") # layer.attr['padding'].s.decode("utf-8") |
|
|
|
pads = get_attr(layer, "pads") |
|
|
|
strides = get_attr(layer, "strides") # layer.attr['strides'].list.i |
|
|
|
dilations = get_attr(layer, "dilations") # layer.attr['dilations'].list.i |
|
|
|
pool_size = get_attr(layer, "ksize") # layer.attr['ksize'].list.i |
|
|
|
shape = get_attr(layer, "shape") |
|
|
|
starts = get_attr(layer, "starts") |
|
|
|
|
|
|
alpha = get_attr(layer, "alpha", default=1) |
|
|
|
beta = get_attr(layer, "beta") |
|
|
|
|
|
|
|
if activation and not activation in known_activations: |
|
|
|
if activation and activation not in known_activations: |
|
|
|
if auto_pad and not auto_pad in known_paddings: |
|
|
|
if auto_pad and auto_pad not in known_paddings: |
|
|
|
if data_frmt and not data_frmt in supported_data_formats: |
|
|
|
if data_frmt and data_frmt not in supported_data_formats: |
|
|
|
print("UNSUPPORTED: data format", data_frmt) |
|
|
|
|
|
|
|
o_l.activation = known_activations.get(activation) or 0 |
|
|
|
|
|
|
-1 not in input_ranks |
|
|
|
) # for rank() lambda all input ranks have to be known (not -1) |
|
|
|
rank = rank(input_ranks) |
|
|
|
if rank == None: |
|
|
|
if rank is None: |
|
|
|
|
|
|
|
def all_elements_equal(arr): # http://stackoverflow.com/q/3844948/ |
|
|
|
return arr.count(arr[0]) == len(arr) |
|
|
|
|
|
|
# filter only inputs that are coming from nodes that are outside this pattern |
|
|
|
# preserve the order |
|
|
|
pattern_nodes = [n.name for n in pattern_nodes] + tensor_names |
|
|
|
# inputs_from_outside_pattern = remove_duplicates_from_list([i for i in inputs_to_op_nodes if nodes_by_name[i] not in pattern_nodes]) |
|
|
|
# inputs_from_outside_pattern = remove_duplicates_from_list([i for i in inputs_to_op_nodes if |
|
|
|
# nodes_by_name[i] not in pattern_nodes]) |
|
|
|
inputs_from_outside_pattern = remove_duplicates_from_list( |
|
|
|
[i for i in inputs_to_op_nodes if i not in pattern_nodes] |
|
|
|
) |
|
|
|
|
|
|
Converts a TensorFlow model into a Barracuda model. |
|
|
|
:param source_file: The TensorFlow Model |
|
|
|
:param target_file: The name of the file the converted model will be saved to |
|
|
|
:param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed. |
|
|
|
:param trim_unused_by_output: The regexp to match output nodes to remain in the model. |
|
|
|
All other unconnected nodes will be removed. |
|
|
|
:param verbose: If True, will display debug messages |
|
|
|
:param compress_f16: If true, the float values will be converted to f16 |
|
|
|
:return: |
|
|
|
|
|
|
o_model.layers = cleanup_layers(o_model.layers) |
|
|
|
|
|
|
|
all_inputs = {i for l in o_model.layers for i in l.inputs} |
|
|
|
embedded_tensors = {t.name for l in o_model.layers for t in l.tensors} |
|
|
|
|
|
|
|
# Trim |
|
|
|
if trim_unused_by_output: |
|
|
|