summaryrefslogtreecommitdiff
path: root/SeparableConvolution.py
blob: 2ac31f94f7e48d37c0e29068d326c38170f6a11e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import torch

import _ext.cunnex

class SeparableConvolution(torch.autograd.Function):
	def __init__(self):
		super(SeparableConvolution, self).__init__()
	# end

	def forward(self, input1, input2, input3):
		intBatches = input1.size(0)
		intInputDepth = input1.size(1)
		intInputHeight = input1.size(2)
		intInputWidth = input1.size(3)
		intFilterSize = min(input2.size(1), input3.size(1))
		intOutputHeight = min(input2.size(2), input3.size(2))
		intOutputWidth = min(input2.size(3), input3.size(3))

		assert(intInputHeight - 51 == intOutputHeight - 1)
		assert(intInputWidth - 51 == intOutputWidth - 1)
		assert(intFilterSize == 51)

		assert(input1.is_contiguous() == True)
		assert(input2.is_contiguous() == True)
		assert(input3.is_contiguous() == True)

		output = input1.new().resize_(intBatches, intInputDepth, intOutputHeight, intOutputWidth).zero_()

		if input1.is_cuda == True:
			_ext.cunnex.SeparableConvolution_cuda_forward(
				input1,
				input2,
				input3,
				output
			)

		elif input1.is_cuda == False:
			raise NotImplementedError() # CPU VERSION NOT IMPLEMENTED

		# end

		return output
	# end

	def backward(self, gradOutput):
		raise NotImplementedError() # BACKPROPAGATION NOT IMPLEMENTED
	# end
# end