diff --git a/docs/assets/nodes/logic/neuro.png b/docs/assets/nodes/logic/neuro.png
new file mode 100644
index 0000000000000000000000000000000000000000..6f49699162959c69759f33c84e824b5d7eefc75b
Binary files /dev/null and b/docs/assets/nodes/logic/neuro.png differ
diff --git a/docs/assets/nodes/logic/neuro_ansumble.png b/docs/assets/nodes/logic/neuro_ansumble.png
new file mode 100644
index 0000000000000000000000000000000000000000..ad02ab1ad7b9e21bc66b6ab0e1d3ec691764a709
Binary files /dev/null and b/docs/assets/nodes/logic/neuro_ansumble.png differ
diff --git a/docs/assets/nodes/logic/neuro_data_in.png b/docs/assets/nodes/logic/neuro_data_in.png
new file mode 100644
index 0000000000000000000000000000000000000000..3f20bd11bf7d3a7b1030131fa0e5bb1ea5e3da99
Binary files /dev/null and b/docs/assets/nodes/logic/neuro_data_in.png differ
diff --git a/docs/assets/nodes/logic/neuro_data_in_text.png b/docs/assets/nodes/logic/neuro_data_in_text.png
new file mode 100644
index 0000000000000000000000000000000000000000..dcb1641879ee3ffa80c1837fe0ebebfa0f811950
Binary files /dev/null and b/docs/assets/nodes/logic/neuro_data_in_text.png differ
diff --git a/docs/assets/nodes/logic/neuro_etalon.png b/docs/assets/nodes/logic/neuro_etalon.png
new file mode 100644
index 0000000000000000000000000000000000000000..14127e1250b9a8456cd3085496829dc6662bb9b9
Binary files /dev/null and b/docs/assets/nodes/logic/neuro_etalon.png differ
diff --git a/docs/assets/nodes/logic/neuro_result.png b/docs/assets/nodes/logic/neuro_result.png
new file mode 100644
index 0000000000000000000000000000000000000000..713199abedb6fcc6647090791d35c8a492f45c2e
Binary files /dev/null and b/docs/assets/nodes/logic/neuro_result.png differ
diff --git a/docs/assets/nodes/logic/neuro_training_result.png b/docs/assets/nodes/logic/neuro_training_result.png
new file mode 100644
index 0000000000000000000000000000000000000000..bfad618c4a5ca05fe713689aee151875e6b798cb
Binary files /dev/null and b/docs/assets/nodes/logic/neuro_training_result.png differ
diff --git a/docs/nodes/logic/neuro_elman.rst b/docs/nodes/logic/neuro_elman.rst
index d5c2c857581414dcba70328b96de3abaa2b755fb..9266b5895a06470176dc812ae01e935d063d1ece 100644
--- a/docs/nodes/logic/neuro_elman.rst
+++ b/docs/nodes/logic/neuro_elman.rst
@@ -1,18 +1,128 @@
-Elman neuro node layer 1
-========================
-
- Neuro network node
- This node teachable. You may teach him rules, that he understand himself. Just put data and correct answer. When displace answer, he will find right answer himself.
- Input data. Inserting many objects - output many objects. Inserting one object with many parameters - output one object.
- Always insert constant numbers count of parameters, otherwise it will reset neuro data and start every time from beginning. Keep constant numbers count.
-
-- coef_learning - learning speed coeffitient, accuracy influence (less - more accuracy);
-- gisterezis - spread of input and etalon data;
-- maximum - maximum number input (better define little overhang number);
-- cycles - passes on one object;
-- A layer - input layer cores (and it is number of objects);
-- B layer - inner layer cores - more - smarter (overlearning is bad too);
-- C layer - output layer cores - numbers quantity in output;
-- epsilon - inner variable - argument offset in passes 'cycles' (not much influence totally);
-- lambda - holding coefficient, to preserve data flooding;
-- threshold - inner variable - defines reasonability limit in passes 'cycles' (not much influence totally).
+Neuro Elman 1 Layer
+===================
+
+.. image:: ../../assets/nodes/logic/neuro.png
+
+.. _`Russian translation here`: ./neuro_elman_ru.md
+
+Functionality
+-------------
+
+
+Layer 1 neuron network with studying. With Inputs and Outputs.
+You should study network on data. After studying use node on your tree.
+
+.. _`Algorythm description`: https://kpfu.ru/staff_files/F1493580427/NejronGafGal.pdf
+
+
+Tune node before use. Mandatory props first. Additional props can be passed.
+After tuning and connecting links go to study.
+Every update will teach your node.
+You can press update and wait some time.
+
+
+Data shape:
+-----------
+
+::
+
+ [obj1, obj2, obj3, ...]
+ obj = [...]
+ [[...], [...], [...], ...]
+
+Object levels 2 levels. Every object contain data for one step of srudying.
+Prepare data that way and link to input. To etalon send same objects count. Output will contain same objects count.
+
+
+Category
+--------
+
+Logic -> Neuro Elman 1 Layer
+
+Inputs
+------
+
+* **data** - input data
+* **etalon** - expected data
+
+
+Outputs
+-------
+
+* **result** - resulting data
+
+
+Parameters
+----------
+
++--------------------+--------+--------------------------------------------------------------------------------+
+| Parameters | Type | Description |
++====================+========+================================================================================+
+| **A layer** | int | First layer neurons count, same as object count |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **B layer** | int | Second layer neurons count, inner layer |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **C layer** | int | Third layer neurons count, Equal to objects count on output |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **maximum** | float | Maximum possible values, meaning to be used on node |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **koeff learning** | float | node learning tempo (change w/o fear) |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **gisterezis** | float | scheduled to set thresholds for signal processing (not in use yet) |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **cycles** | int | Loops count to study by one pass |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **epsilon** | float | the susceptibility of the node study |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **lambda** | float | weight coefficients changing's step |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **treshold** | float | Threshold preserve retraining |
++--------------------+--------+--------------------------------------------------------------------------------+
+| **Reset** | Button | reset all coeffitients |
++--------------------+--------+--------------------------------------------------------------------------------+
+
+
+Usage
+-----
+
+**Task statement**
+~~~~~~~~~~~~~~~~~~
+
+Please, study for XOR operation:
+
+::
+
+ [1, 1] = [1]
+ [1, 0] = [1]
+ [0, 1] = [1]
+ [0, 0] = [0]
+
+**Preparations**
+~~~~~~~~~~~~~~~~
+
+.. image:: ../../assets/nodes/logic/neuro_data_in.png
+.. image:: ../../assets/nodes/logic/neuro_data_in_text.png
+
+Same with expected data:
+""""""""""""""""""""""""
+
+.. image:: ../../assets/nodes/logic/neuro_etalon.png
+
+**Node preparations**
+~~~~~~~~~~~~~~~~~~~~~
+
+* **A layer** - Set value 2, because inputs are pairs
+* **B layer** - Let it be 5, but can be any (experiment here)
+* **C layer** - Setting 1, because output have to be one number
+
+.. image:: ../../assets/nodes/logic/neuro_ansumble.png
+
+Running learning and waiting. Interrupt Studying. I had have that result:
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+.. image:: ../../assets/nodes/logic/neuro_training_result.png
+
+Compare result:
+"""""""""""""""
+
+.. image:: ../../assets/nodes/logic/neuro_result.png
diff --git a/docs/nodes/logic/neuro_elman_ru.md b/docs/nodes/logic/neuro_elman_ru.md
new file mode 100644
index 0000000000000000000000000000000000000000..396a26426a16e98d38f20f072146b49f42233f66
--- /dev/null
+++ b/docs/nodes/logic/neuro_elman_ru.md
@@ -0,0 +1,88 @@
+# Neuro Elman 1 Layer
+
+
+
+
Functionality
+
+Однослойная нейронная сеть с обучением. На вход подаются входные данные и ожидаемые выходные.
+Необходимо сеть обучить на наборе данных. После обучения можно использовать этот узел для вычислений по новому набору.
+
+Описание алгоритма
+Перед использованием узел нужно настроить. Сначала настраиваются обязаельные параметры. Дополнительные парметры можно не трогать без необходимости.
+После настройки узла и поключения связей можно приступать к обчению.
+Узел обучается при каждом обновлении. Обновить можно с помощью кнопки или запустить анимацию.
+Последний вариант удобен тем, что можно просто ждать пока узел обучится, и затем остановить анимацию.
+
+
+Формат данных:
+[obj1, obj2, obj3, ...]
+obj = [...]
+[[...], [...], [...], ...]
+т.е. уровень списка объектов. Каждый объект содержит список значений для одной порции обучения.
+Т.о. предварительно подготовить данные и запаковать их в объекты, а затем подать на вход. На вход etalon нужно подать
+такое же количество объектов, чтобы обучать сеть. На выходе получим такое же количество объектов с результатом работы сети.
+
+
+Category
+Logic -> Neuro Elman 1 Layer
+
+Inputs
+
+- data - входящий набор данных
+- etalon - ожидаемый набор данных
+
+
+Outputs
+
+- result - результат преобразований
+
+
+Parameters
+
+- A layer - количество нейронов первого слоя, равно количеству входных данных в объекте
+- B layer - количество нейронов второго слоя, это внутренний слой
+- C layer - количество нейронов третьего слоя, равно количеству значений в объекте на выходе
+- maximum - максимальные возможные значения, которые предполагается использовать на узле
+- koeff learning - темп обучения узла (не страшно менять)
+- gisterezis - запланирован для установки пороговых значений при обработке сигналов (пока не используется)
+- cycles - количество циклов обучения узла за один прогон
+- epsilon - восприимчивость обучаения узла
+- lambda - шаг изменения весовых коэффиуциентов
+- treshold - порог, предотвращающий переобучение
+- Reset (button) - сброс всех весовых коэффициентов
+
+
+Usage
+Постановка задачи
+
+Необходимо обучить сеть операции XOR:
+[1, 1] = [1]
+[1, 0] = [1]
+[0, 1] = [1]
+[0, 0] = [0]
+
+Подготовка набора данных
+
+
+
+
Аналогично подготовим ожидаемые данные
+
+
+Подготовка узла
+
+- A layer - задаём значение 2, т.к. на вход подаются значения парами
+- B layer - поставим 5, но может быт любым (экспериментируйте)
+- C layer - задаём 1, т.к. на выходе должны получить результат в одно число
+
+
+
+Запускаем анимацию и ждём минуту. Затем выключаем анимацию. У меня получился такой результат:
+
+
+
+
+Давайте проверим результат обучения.
+
+
+
+
diff --git a/nodes/logic/neuro_elman.py b/nodes/logic/neuro_elman.py
index 6378b8f8461179d14eee50944e0753c5ee91055e..6f9291bbdad5fc0fe2fabd0a9898e9d1b6d1941b 100644
--- a/nodes/logic/neuro_elman.py
+++ b/nodes/logic/neuro_elman.py
@@ -31,163 +31,173 @@ from copy import deepcopy
from cmath import exp
-class SvNeuro_Elman:
-
+class SvNeuroElman:
+ """ A set of functions for working with teachable neuron """
def init_w(self, number, ext, treshold):
out = []
- for n in range(number):
- tmp = []
- for e in range(ext):
- tmp.append(uniform(-treshold, treshold))
+ for _ in range(number):
+ tmp = [uniform(-treshold, treshold) for _ in range(ext)]
out.append(tmp)
return out
-
- def sigmoida(self, x, a):
- if a==0:
- b=1
- else:
- b = 1/a
- return 1/(1+exp(-b*x).real+1e-8)
-
- def neuro(self, list_in, etalon, maxim, learning, prop):
- outA = self.layerA(list_in, prop)
- outB = self.layerB(outA, prop)
- outC = self.layerC(outB, prop)
-
- if learning:
- lin = len(etalon)
- if linabs(maxim/2): break
- list_x[idx] = xi
-
- outB_ = self.layerB(list_x, prop)
- outC_ = self.layerC(outB, prop)
-
- prop['wA'] = list_wA
- prop['wB'] = list_wB
-
+ b = 1 / a
+ return b * yj * (1 - yj)
+
+ @staticmethod
+ def func_ej_last(dj, yj):
+ return dj - yj
+
+ @staticmethod
+ def func_ej_inner(e_sigma_k, wkj):
+ return e_sigma_k * wkj
+
+ @staticmethod
+ def delta_wji(sigma_j, yi, prop):
+ return prop['k_learning'] * sigma_j * yi
+
+ @staticmethod
+ def func_w(w, dw, prop):
+ return (1 - prop['k_lambda']) * w + dw
+
+ def learning(self, out_a, out_b, out_c, etalon, maxim, prop):
+ weights_a = deepcopy(prop['wA'])
+ weights_b = deepcopy(prop['wB'])
+ _out_a = deepcopy(out_a)
+ for idx, native_signal_a in enumerate(out_a):
+ processed_signal_a = deepcopy(native_signal_a)
+ _out_b = deepcopy(out_b)
+ _out_c = deepcopy(out_c)
+ for _ in range(prop['cycles']):
+ in_b = [0] * prop['InB']
+ in_a = [0] * prop['InA']
+ for idc, signal_c in enumerate(_out_c):
+ c_ = self.sigmoida(signal_c)
+ e_c = self.func_ej_last(etalon[idc], signal_c)
+ f_vc = self.f_vj_sigmoida(prop['InC'], c_)
+ sigma_c = self.sigma(e_c, f_vc)
+
+ for idb, signal_b in enumerate(_out_b):
+ dwji = self.delta_wji(sigma_c, signal_b, prop)
+ weights_b[idb][idc] = self.func_w(weights_b[idb][idc], dwji, prop)
+ in_b[idb] += sigma_c * dwji
+
+ for idb, signal_b in enumerate(_out_b):
+ f_vb = self.f_vj_sigmoida(prop['InB'], signal_b)
+ sigma_b = self.sigma(in_b[idb], f_vb)
+
+ for ida, signal_a in enumerate(out_a):
+ dwji = self.delta_wji(sigma_b, signal_a, prop)
+ weights_a[ida][idb] = self.func_w(weights_a[ida][idb], dwji, prop)
+ in_a[ida] += sigma_b * dwji
+
+ processed_signal_a -= prop['epsilon'] * processed_signal_a * (maxim - processed_signal_a)
+ absdx = abs(native_signal_a - processed_signal_a)
+ if absdx <= prop['trashold'] or absdx > abs(maxim / 2):
+ break
+ _out_a[idx] = processed_signal_a
+
+ _out_b = self.layer_b(_out_a, prop)
+ _out_c = self.layer_c(out_b, prop)
+
+ prop['wA'] = weights_a
+ prop['wB'] = weights_b
class SvNeuroElman1LNode(bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode):
- ''' Neuro Elman 1 Layer '''
+ '''
+ Neuro Elman 1 Layer
+ Teachable node getting data with etalon example to learn
+ Than after animated learning can output result without
+ etalon values
+ '''
+
bl_idname = 'SvNeuroElman1LNode'
bl_label = '*Neuro Elman 1 Layer'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_NEURO'
- Elman = SvNeuro_Elman()
-
- k_learning: FloatProperty(name='k_learning', default=0.1, update=updateNode)
- gisterezis: FloatProperty(name='gisterezis', default=0.1, min=0.0, update=updateNode)
- maximum: FloatProperty(name='maximum', default=3.0, update=updateNode)
- menushka: BoolProperty(name='menushka', default=False)
- epsilon: FloatProperty(name='epsilon', default=1.0, update=updateNode)
- treshold: FloatProperty(name='treshold', default=0.01, update=updateNode)
- k_lambda: FloatProperty(name='k_lambda', default=0.001, max=0.1, update=updateNode)
- cycles: IntProperty(name='cycles', default=3, min = 1, update=updateNode)
- lA: IntProperty(name='lA', default=1, min = 0, update=updateNode)
- lB: IntProperty(name='lB', default=5, min = 0, update=updateNode)
- lC: IntProperty(name='lC', default=1, min = 0, update=updateNode)
+ elman = None
+
+ k_learning: FloatProperty(name='k_learning', default=0.1, update=updateNode, description="Learning rate")
+ gisterezis: FloatProperty(name='gisterezis', default=0.1, min=0.0, update=updateNode,
+ description="Sets the threshold of values inside the learning algorithm (in plans)")
+ maximum: FloatProperty(name='maximum', default=3.0, update=updateNode,
+ description="The maximum value of the input and output layer")
+ menushka: BoolProperty(name='menushka', default=False, description="Extra options")
+ epsilon: FloatProperty(name='epsilon', default=1.0, update=updateNode,
+ description="The coefficient participates in the learning assessment function")
+ treshold: FloatProperty(name='treshold', default=0.01, update=updateNode,
+ description="Participates in learning assessment")
+ k_lambda: FloatProperty(name='k_lambda', default=0.0001, max=0.1, update=updateNode,
+ description="Weight change step during training")
+ cycles: IntProperty(name='cycles', default=3, min=1, update=updateNode, description="Internal Learning Loops")
+ lA: IntProperty(name='lA', default=1, min=0, update=updateNode,
+ description="Input layer (must match the number of elements in the input)")
+ lB: IntProperty(name='lB', default=5, min=0, update=updateNode,
+ description="Inner layer (more nodes - more accurate calculations)")
+ lC: IntProperty(name='lC', default=1, min=0, update=updateNode,
+ description="Output layer (must match the number of elements in the output)")
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "data")
@@ -197,22 +207,24 @@ class SvNeuroElman1LNode(bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode)
def draw_buttons(self, context, layout):
self.draw_animatable_buttons(layout, icon_only=True)
handle_name = self.name + self.id_data.name
- layout.prop(self, "k_learning", text="koeff learning")
- layout.prop(self, "gisterezis", text="gisterezis")
+
+ col_top = layout.column(align=True)
+ row = col_top.row(align=True)
+ row.prop(self, "lA", text="A layer")
+ row = col_top.row(align=True)
+ row.prop(self, "lB", text="B layer")
+ row = col_top.row(align=True)
+ row.prop(self, "lC", text="C layer")
+
layout.prop(self, "maximum", text="maximum")
- layout.prop(self, "cycles", text="cycles")
- op_start = layout.operator('node.sverchok_neuro', text='Restart')
- op_start.typ=1
+ op_start = layout.operator('node.sverchok_neuro', text='Reset')
+ op_start.typ = 1
op_start.handle_name = handle_name
layout.prop(self, "menushka", text="extend sets:")
if self.menushka:
- col_top = layout.column(align=True)
- row = col_top.row(align=True)
- row.prop(self, "lA", text="A layer")
- row = col_top.row(align=True)
- row.prop(self, "lB", text="B layer")
- row = col_top.row(align=True)
- row.prop(self, "lC", text="C layer")
+ layout.prop(self, "k_learning", text="koeff learning")
+ layout.prop(self, "gisterezis", text="gisterezis")
+ layout.prop(self, "cycles", text="cycles")
col = layout.column(align=True)
col.prop(self, "epsilon", text="epsilon")
col = layout.column(align=True)
@@ -220,47 +232,43 @@ class SvNeuroElman1LNode(bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode)
col = layout.column(align=True)
col.prop(self, "treshold", text="treshold")
-
def process(self):
handle_name = self.name + self.id_data.name
handle = handle_read(handle_name)
props = handle[1]
if not handle[0]:
- props = {'InA':2,
- 'InB':5,
- 'InC':1,
- 'wA':[],
- 'wB':[],
- 'gister':0.01,
- 'k_learning':0.1,
- 'epsilon':1.3,
- 'cycles':3,
- 'trashold':0.01,
- 'k_lambda':0.0001}
-
- props['wA'] = self.Elman.init_w(props['InA'], props['InB'], props['trashold'])
- props['wB'] = self.Elman.init_w(props['InB'], props['InC'], props['trashold'])
-
-
- self.Elman.gister = abs(self.gisterezis)
- self.Elman.k_learning = self.k_learning
-
+ elman = SvNeuroElman()
+ props = {'InA': 2,
+ 'InB': 5,
+ 'InC': 1,
+ 'wA': [],
+ 'wB': [],
+ 'gister': 0.01,
+ 'k_learning': 0.1,
+ 'epsilon': 1.3,
+ 'cycles': 3,
+ 'trashold': 0.01,
+ 'k_lambda': 0.0001,
+ 'Elman': elman,
+ }
+
+ self.elman = props['Elman']
result = []
if self.outputs['result'].is_linked and self.inputs['data'].is_linked:
if self.inputs['etalon'].is_linked:
- etalon = self.inputs['etalon'].sv_get()[0]
- flag = True
+ input_etalon = self.inputs['etalon'].sv_get()
+ is_learning = True
else:
- flag = False
- etalon = [[0]]
+ input_etalon = [[0]]
+ is_learning = False
- if (props['InA']!=self.lA+1) or props['InB']!=self.lB or props['InC']!=self.lC:
- props['InA'] = self.lA+1
+ if (props['InA'] != self.lA + 1) or props['InB'] != self.lB or props['InC'] != self.lC:
+ props['InA'] = self.lA + 1
props['InB'] = self.lB
props['InC'] = self.lC
- props['wA'] = self.Elman.init_w(props['InA'], props['InB'])
- props['wB'] = self.Elman.init_w(props['InB'], props['InC'])
+ props['wA'] = self.elman.init_w(props['InA'], props['InB'], props['trashold'])
+ props['wB'] = self.elman.init_w(props['InB'], props['InC'], props['trashold'])
props['gister'] = self.gisterezis
props['k_learning'] = self.k_learning
@@ -269,29 +277,32 @@ class SvNeuroElman1LNode(bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode)
props['cycles'] = self.cycles
props['trashold'] = self.treshold
- data_ = self.inputs['data'].sv_get()[0]
- if type(etalon[0]) not in [list, tuple]: etalon = [etalon]
- if type(data_[0]) not in [list, tuple]: data_ = [data_]
- for idx, data in enumerate(data_):
- let = len(etalon)-1
- eta = etalon[min(idx,let)]
- data2 = [1.0]+data
- if type(eta) not in [list, tuple]: eta = [eta]
- result.append([self.Elman.neuro(data2, eta, self.maximum, flag, props)])
+ input_data = self.inputs['data'].sv_get()
+
+ if type(input_etalon[0]) not in [list, tuple]:
+ input_etalon = [input_etalon]
+ if type(input_data[0]) not in [list, tuple]:
+ input_data = [input_data]
+
+ for idx, data in enumerate(input_data):
+ let = len(input_etalon) - 1
+ eta = input_etalon[min(idx, let)]
+ data2 = [1.0] + data
+ if type(eta) not in [list, tuple]:
+ eta = [eta]
+ result.append(self.elman.neuro(data2, eta, self.maximum, is_learning, props))
else:
- result = [[[]]]
+ result = [[]]
handle_write(handle_name, props)
self.outputs['result'].sv_set(result)
-
-
-#*********************************
+# *********************************
class SvNeuroOps(bpy.types.Operator):
- """ Neuro operators """
+ """ Resetting weights """
bl_idname = "node.sverchok_neuro"
bl_label = "Sverchok Neuro operators"
bl_options = {'REGISTER', 'UNDO'}
@@ -303,10 +314,10 @@ class SvNeuroOps(bpy.types.Operator):
if self.typ == 1:
handle = handle_read(self.handle_name)
prop = handle[1]
- Elman = SvNeuro_Elman()
if handle[0]:
- prop['wA']=Elman.init_w(prop['InA'], prop['InB'], prop['trashold'])
- prop['wB']=Elman.init_w(prop['InB'], prop['InC'], prop['trashold'])
+ elman = prop['Elman']
+ prop['wA'] = elman.init_w(prop['InA'], prop['InB'], prop['trashold'])
+ prop['wB'] = elman.init_w(prop['InB'], prop['InC'], prop['trashold'])
handle_write(self.handle_name, prop)
return {'FINISHED'}