zah-zah
2013-02-02, 13:42
أرجو المساعدة في معرفة تأثير تغير عدد الطبقات وعدد العصبونات ونوع ىتابع التفعيل وتغير معدل التعليم على الشبكة التالية وشكرا
w1=[0.52 0.41 0.12;0.22 0.61 0.21];
w2=[0.12 0.52 0.31;0.22 0.42 0.32;0.24 0.18 0.62];
w3=[0.31 0.32;0.32 0.24;0.42 0.31];
X=[0.04 0.03;0.09 0.12;0.13 0.21;0.16 0.23;0.20 0.25;0.75 0.75;0.81 0.83;0.85 0.84;0.91 0.89;0.95 0.92];
d=[1 1 1 1 1 0 0 0 0 0 ;0 0 0 0 0 1 1 1 1 1 ];
gama=0.36;
% Training loop
for k=1:10000
% presentation & Feed Forward Propagation
k1=floor(length(X)*rand+1);
x=X(k1,:)
s1=x*w1
y1=1./(1+exp(-s1))
s2=y1*w2
y2=1./(1+exp(-s2))
so=y2*w3
do=d(:,k1)
e=do-so'
delta_o=e %output neurons are linears
delta_h=w3*delta_o.*(y2'.*(1-y2'))
delta_h2=w2*delta_h.*(y1'.*(1-y1'));
d_w1=gama*x'*delta_h2';
d_b1=gama*delta_h2;
d_w2=gama*y1'*delta_h2';
d_b2=gama*delta_h2;
d_w3=gama*y2'*delta_o';
d_b3=gama*delta_o;
w1=w1+d_w1;
w2=w2+d_w2;
w3=w3+d_w3;
end
%test
for k1=1:length(X)
x=X(k1,:);
s1=x*w1;
y1=1./(1+exp(-s1));
s2=y1*w2;
y2=1./(1+exp(-s2));
so=y2*w3;
yo=so;
x
yo
end
plot (x,yo)
k
w1=[0.52 0.41 0.12;0.22 0.61 0.21];
w2=[0.12 0.52 0.31;0.22 0.42 0.32;0.24 0.18 0.62];
w3=[0.31 0.32;0.32 0.24;0.42 0.31];
x1=input('Please Enter Point x1:');
x2=input('Please Enter Point x2:');
i=[x1 x2];
gama=0.36;
% Training loop
for k=1:10000
% presentation & Feed Forward Propagation
k1=floor(length(X)*rand+1);
x=X(k1,:)
s1=x*w1
y1=1./(1+exp(-s1))
s2=y1*w2
y2=1./(1+exp(-s2))
so=y2*w3
do=d(:,k1)
e=do-so'
delta_o=e %output neurons are linears
delta_h=w3*delta_o.*(y2'.*(1-y2'))
delta_h2=w2*delta_h.*(y1'.*(1-y1'));
d_w1=gama*x'*delta_h2';
d_b1=gama*delta_h2;
d_w2=gama*y1'*delta_h2';
d_b2=gama*delta_h2;
d_w3=gama*y2'*delta_o';
d_b3=gama*delta_o;
w1=w1+d_w1;
w2=w2+d_w2;
w3=w3+d_w3;
end
%test
for k1=1:length(X)
x=X(k1,:);
s1=x*w1;
y1=1./(1+exp(-s1));
s2=y1*w2;
y2=1./(1+exp(-s2));
so=y2*w3;
yo=so;
x
yo
end
s1=i*w1;
y1=1./(1+exp(-s1));
s2=y1*w2;
y2=1./(1+exp(-s2));
so=y2*w3;
yo=so;
i
yo
e
w1=[0.52 0.41 0.12;0.22 0.61 0.21];
w2=[0.12 0.52 0.31;0.22 0.42 0.32;0.24 0.18 0.62];
w3=[0.31 0.32;0.32 0.24;0.42 0.31];
X=[0.04 0.03;0.09 0.12;0.13 0.21;0.16 0.23;0.20 0.25;0.75 0.75;0.81 0.83;0.85 0.84;0.91 0.89;0.95 0.92];
d=[1 1 1 1 1 0 0 0 0 0 ;0 0 0 0 0 1 1 1 1 1 ];
gama=0.36;
% Training loop
for k=1:10000
% presentation & Feed Forward Propagation
k1=floor(length(X)*rand+1);
x=X(k1,:)
s1=x*w1
y1=1./(1+exp(-s1))
s2=y1*w2
y2=1./(1+exp(-s2))
so=y2*w3
do=d(:,k1)
e=do-so'
delta_o=e %output neurons are linears
delta_h=w3*delta_o.*(y2'.*(1-y2'))
delta_h2=w2*delta_h.*(y1'.*(1-y1'));
d_w1=gama*x'*delta_h2';
d_b1=gama*delta_h2;
d_w2=gama*y1'*delta_h2';
d_b2=gama*delta_h2;
d_w3=gama*y2'*delta_o';
d_b3=gama*delta_o;
w1=w1+d_w1;
w2=w2+d_w2;
w3=w3+d_w3;
end
%test
for k1=1:length(X)
x=X(k1,:);
s1=x*w1;
y1=1./(1+exp(-s1));
s2=y1*w2;
y2=1./(1+exp(-s2));
so=y2*w3;
yo=so;
x
yo
end
plot (x,yo)
k
w1=[0.52 0.41 0.12;0.22 0.61 0.21];
w2=[0.12 0.52 0.31;0.22 0.42 0.32;0.24 0.18 0.62];
w3=[0.31 0.32;0.32 0.24;0.42 0.31];
x1=input('Please Enter Point x1:');
x2=input('Please Enter Point x2:');
i=[x1 x2];
gama=0.36;
% Training loop
for k=1:10000
% presentation & Feed Forward Propagation
k1=floor(length(X)*rand+1);
x=X(k1,:)
s1=x*w1
y1=1./(1+exp(-s1))
s2=y1*w2
y2=1./(1+exp(-s2))
so=y2*w3
do=d(:,k1)
e=do-so'
delta_o=e %output neurons are linears
delta_h=w3*delta_o.*(y2'.*(1-y2'))
delta_h2=w2*delta_h.*(y1'.*(1-y1'));
d_w1=gama*x'*delta_h2';
d_b1=gama*delta_h2;
d_w2=gama*y1'*delta_h2';
d_b2=gama*delta_h2;
d_w3=gama*y2'*delta_o';
d_b3=gama*delta_o;
w1=w1+d_w1;
w2=w2+d_w2;
w3=w3+d_w3;
end
%test
for k1=1:length(X)
x=X(k1,:);
s1=x*w1;
y1=1./(1+exp(-s1));
s2=y1*w2;
y2=1./(1+exp(-s2));
so=y2*w3;
yo=so;
x
yo
end
s1=i*w1;
y1=1./(1+exp(-s1));
s2=y1*w2;
y2=1./(1+exp(-s2));
so=y2*w3;
yo=so;
i
yo
e