@@ -59,12 +59,11 @@ pure module subroutine forward(self, input)
59
59
class(conv1d_layer), intent (in out ) :: self
60
60
real , intent (in ) :: input(:,:)
61
61
integer :: input_channels, input_width
62
- integer :: j, n, a, b
63
- integer :: iws, iwe, half_window
62
+ integer :: j, n
63
+ integer :: iws, iwe
64
64
65
65
input_channels = size (input, dim= 1 )
66
66
input_width = size (input, dim= 2 )
67
- half_window = self % kernel_size / 2
68
67
69
68
! Loop over output positions.
70
69
do j = 1 , self % width
@@ -95,9 +94,8 @@ pure module subroutine backward(self, input, gradient)
95
94
real , intent (in ) :: gradient(:,:)
96
95
97
96
integer :: input_channels, input_width, output_width
98
- integer :: j, n, k, a, b, c
99
- integer :: iws, iwe, half_window
100
- real :: gdz_val
97
+ integer :: j, n, k
98
+ integer :: iws, iwe
101
99
102
100
! Local arrays to accumulate gradients.
103
101
real :: gdz(self % filters, self % width) ! local gradient (dL/dz)
@@ -109,8 +107,6 @@ pure module subroutine backward(self, input, gradient)
109
107
input_width = size (input, dim= 2 )
110
108
output_width = self % width ! Note: output_width = input_width - kernel_size + 1
111
109
112
- half_window = self % kernel_size / 2
113
-
114
110
!- -- Compute the local gradient gdz = (dL/dy) * sigma'(z) for each output.
115
111
do j = 1 , output_width
116
112
gdz(:, j) = gradient(:, j) * self % activation % eval_prime(self % z(:, j))
0 commit comments