hog.cu 35.4 KB
Newer Older
wester committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

#if !defined CUDA_DISABLER

wester committed
45 46 47 48
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/warp_shuffle.hpp"
wester committed
49

wester committed
50
namespace cv { namespace gpu { namespace device
wester committed
51
{
wester committed
52 53 54 55 56
    // Other values are not supported
    #define CELL_WIDTH 8
    #define CELL_HEIGHT 8
    #define CELLS_PER_BLOCK_X 2
    #define CELLS_PER_BLOCK_Y 2
wester committed
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

    namespace hog
    {
        __constant__ int cnbins;
        __constant__ int cblock_stride_x;
        __constant__ int cblock_stride_y;
        __constant__ int cnblocks_win_x;
        __constant__ int cnblocks_win_y;
        __constant__ int cblock_hist_size;
        __constant__ int cblock_hist_size_2up;
        __constant__ int cdescr_size;
        __constant__ int cdescr_width;


        /* Returns the nearest upper power of two, works only for
        the typical GPU thread count (pert block) values */
        int power_2up(unsigned int n)
        {
wester committed
75 76 77 78 79 80 81 82 83 84 85
            if (n < 1) return 1;
            else if (n < 2) return 2;
            else if (n < 4) return 4;
            else if (n < 8) return 8;
            else if (n < 16) return 16;
            else if (n < 32) return 32;
            else if (n < 64) return 64;
            else if (n < 128) return 128;
            else if (n < 256) return 256;
            else if (n < 512) return 512;
            else if (n < 1024) return 1024;
wester committed
86 87 88 89
            return -1; // Input is too big
        }


a  
Kai Westerkamp committed
90
        void set_up_constants(int nbins, int block_stride_x, int block_stride_y,
wester committed
91
                              int nblocks_win_x, int nblocks_win_y)
wester committed
92
        {
a  
Kai Westerkamp committed
93 94 95 96 97
            cudaSafeCall( cudaMemcpyToSymbol(cnbins, &nbins, sizeof(nbins)) );
            cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_x, &block_stride_x, sizeof(block_stride_x)) );
            cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_y, &block_stride_y, sizeof(block_stride_y)) );
            cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x)) );
            cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y)) );
wester committed
98

wester committed
99
            int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
a  
Kai Westerkamp committed
100
            cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size, &block_hist_size, sizeof(block_hist_size)) );
wester committed
101 102

            int block_hist_size_2up = power_2up(block_hist_size);
a  
Kai Westerkamp committed
103
            cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size_2up, &block_hist_size_2up, sizeof(block_hist_size_2up)) );
wester committed
104 105

            int descr_width = nblocks_win_x * block_hist_size;
a  
Kai Westerkamp committed
106
            cudaSafeCall( cudaMemcpyToSymbol(cdescr_width, &descr_width, sizeof(descr_width)) );
wester committed
107 108

            int descr_size = descr_width * nblocks_win_y;
a  
Kai Westerkamp committed
109
            cudaSafeCall( cudaMemcpyToSymbol(cdescr_size, &descr_size, sizeof(descr_size)) );
wester committed
110 111 112 113 114
        }


        //----------------------------------------------------------------------------
        // Histogram computation
wester committed
115 116


wester committed
117 118
        template <int nblocks> // Number of histogram blocks processed by single GPU thread block
        __global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrStepf grad,
wester committed
119
                                                         const PtrStepb qangle, float scale, float* block_hists)
wester committed
120 121
        {
            const int block_x = threadIdx.z;
wester committed
122
            const int cell_x = threadIdx.x / 16;
wester committed
123
            const int cell_y = threadIdx.y;
wester committed
124
            const int cell_thread_x = threadIdx.x & 0xF;
wester committed
125 126 127 128 129 130

            if (blockIdx.x * blockDim.z + block_x >= img_block_width)
                return;

            extern __shared__ float smem[];
            float* hists = smem;
wester committed
131
            float* final_hist = smem + cnbins * 48 * nblocks;
wester committed
132

wester committed
133 134 135
            const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x +
                                 4 * cell_x + cell_thread_x;
            const int offset_y = blockIdx.y * cblock_stride_y + 4 * cell_y;
wester committed
136

wester committed
137 138
            const float* grad_ptr = grad.ptr(offset_y) + offset_x * 2;
            const unsigned char* qangle_ptr = qangle.ptr(offset_y) + offset_x * 2;
wester committed
139

wester committed
140 141 142 143 144
            // 12 means that 12 pixels affect on block's cell (in one row)
            if (cell_thread_x < 12)
            {
                float* hist = hists + 12 * (cell_y * blockDim.z * CELLS_PER_BLOCK_Y +
                                            cell_x + block_x * CELLS_PER_BLOCK_X) +
wester committed
145 146
                                           cell_thread_x;
                for (int bin_id = 0; bin_id < cnbins; ++bin_id)
wester committed
147
                    hist[bin_id * 48 * nblocks] = 0.f;
wester committed
148

wester committed
149
                const int dist_x = -4 + (int)cell_thread_x - 4 * cell_x;
wester committed
150

wester committed
151 152
                const int dist_y_begin = -4 - 4 * (int)threadIdx.y;
                for (int dist_y = dist_y_begin; dist_y < dist_y_begin + 12; ++dist_y)
wester committed
153 154 155 156 157 158 159
                {
                    float2 vote = *(const float2*)grad_ptr;
                    uchar2 bin = *(const uchar2*)qangle_ptr;

                    grad_ptr += grad.step/sizeof(float);
                    qangle_ptr += qangle.step;

wester committed
160 161
                    int dist_center_y = dist_y - 4 * (1 - 2 * cell_y);
                    int dist_center_x = dist_x - 4 * (1 - 2 * cell_x);
wester committed
162 163 164

                    float gaussian = ::expf(-(dist_center_y * dist_center_y +
                                              dist_center_x * dist_center_x) * scale);
wester committed
165 166
                    float interp_weight = (8.f - ::fabs(dist_y + 0.5f)) *
                                          (8.f - ::fabs(dist_x + 0.5f)) / 64.f;
wester committed
167

wester committed
168 169
                    hist[bin.x * 48 * nblocks] += gaussian * interp_weight * vote.x;
                    hist[bin.y * 48 * nblocks] += gaussian * interp_weight * vote.y;
wester committed
170 171 172
                }

                volatile float* hist_ = hist;
wester committed
173
                for (int bin_id = 0; bin_id < cnbins; ++bin_id, hist_ += 48 * nblocks)
wester committed
174
                {
wester committed
175 176
                    if (cell_thread_x < 6) hist_[0] += hist_[6];
                    if (cell_thread_x < 3) hist_[0] += hist_[3];
wester committed
177
                    if (cell_thread_x == 0)
wester committed
178
                        final_hist[((cell_x + block_x * 2) * 2 + cell_y) * cnbins + bin_id]
wester committed
179 180 181 182 183 184 185 186 187 188
                            = hist_[0] + hist_[1] + hist_[2];
                }
            }

            __syncthreads();

            float* block_hist = block_hists + (blockIdx.y * img_block_width +
                                               blockIdx.x * blockDim.z + block_x) *
                                              cblock_hist_size;

wester committed
189
            int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 16 + cell_thread_x;
wester committed
190 191 192 193
            if (tid < cblock_hist_size)
                block_hist[tid] = final_hist[block_x * cblock_hist_size + tid];
        }

wester committed
194

a  
Kai Westerkamp committed
195 196
        void compute_hists(int nbins, int block_stride_x, int block_stride_y,
                           int height, int width, const PtrStepSzf& grad,
wester committed
197
                           const PtrStepSzb& qangle, float sigma, float* block_hists)
wester committed
198
        {
wester committed
199 200 201
            const int nblocks = 1;

            int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
wester committed
202
                                  block_stride_x;
wester committed
203
            int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) /
wester committed
204 205 206
                                   block_stride_y;

            dim3 grid(divUp(img_block_width, nblocks), img_block_height);
wester committed
207 208 209 210
            dim3 threads(32, 2, nblocks);

            cudaSafeCall(cudaFuncSetCacheConfig(compute_hists_kernel_many_blocks<nblocks>,
                                                cudaFuncCachePreferL1));
wester committed
211 212 213 214

            // Precompute gaussian spatial window parameter
            float scale = 1.f / (2.f * sigma * sigma);

wester committed
215 216
            int hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * 12 * nblocks) * sizeof(float);
            int final_hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * nblocks) * sizeof(float);
wester committed
217
            int smem = hists_size + final_hists_size;
wester committed
218 219
            compute_hists_kernel_many_blocks<nblocks><<<grid, threads, smem>>>(
                img_block_width, grad, qangle, scale, block_hists);
wester committed
220
            cudaSafeCall( cudaGetLastError() );
a  
Kai Westerkamp committed
221 222

            cudaSafeCall( cudaDeviceSynchronize() );
wester committed
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
        }


        //-------------------------------------------------------------
        //  Normalization of histograms via L2Hys_norm
        //


        template<int size>
        __device__ float reduce_smem(float* smem, float val)
        {
            unsigned int tid = threadIdx.x;
            float sum = val;

            reduce<size>(smem, sum, tid, plus<float>());

            if (size == 32)
            {
            #if __CUDA_ARCH__ >= 300
                return shfl(sum, 0);
            #else
                return smem[0];
            #endif
            }
            else
            {
            #if __CUDA_ARCH__ >= 300
                if (threadIdx.x == 0)
                    smem[0] = sum;
            #endif

                __syncthreads();

                return smem[0];
            }
        }


        template <int nthreads, // Number of threads which process one block historgam
                  int nblocks> // Number of block hisograms processed by one GPU thread block
        __global__ void normalize_hists_kernel_many_blocks(const int block_hist_size,
                                                           const int img_block_width,
                                                           float* block_hists, float threshold)
        {
            if (blockIdx.x * blockDim.z + threadIdx.z >= img_block_width)
                return;

            float* hist = block_hists + (blockIdx.y * img_block_width +
                                         blockIdx.x * blockDim.z + threadIdx.z) *
                                        block_hist_size + threadIdx.x;

            __shared__ float sh_squares[nthreads * nblocks];
            float* squares = sh_squares + threadIdx.z * nthreads;

            float elem = 0.f;
            if (threadIdx.x < block_hist_size)
                elem = hist[0];

            float sum = reduce_smem<nthreads>(squares, elem * elem);

            float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size);
            elem = ::min(elem * scale, threshold);

            sum = reduce_smem<nthreads>(squares, elem * elem);

            scale = 1.0f / (::sqrtf(sum) + 1e-3f);

            if (threadIdx.x < block_hist_size)
                hist[0] = elem * scale;
        }


a  
Kai Westerkamp committed
295
        void normalize_hists(int nbins, int block_stride_x, int block_stride_y,
wester committed
296
                             int height, int width, float* block_hists, float threshold)
wester committed
297 298 299
        {
            const int nblocks = 1;

wester committed
300
            int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
wester committed
301 302 303
            int nthreads = power_2up(block_hist_size);
            dim3 threads(nthreads, 1, nblocks);

wester committed
304 305
            int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
            int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) / block_stride_y;
wester committed
306 307 308
            dim3 grid(divUp(img_block_width, nblocks), img_block_height);

            if (nthreads == 32)
a  
Kai Westerkamp committed
309
                normalize_hists_kernel_many_blocks<32, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
wester committed
310
            else if (nthreads == 64)
a  
Kai Westerkamp committed
311
                normalize_hists_kernel_many_blocks<64, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
wester committed
312
            else if (nthreads == 128)
wester committed
313
                normalize_hists_kernel_many_blocks<64, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
wester committed
314
            else if (nthreads == 256)
a  
Kai Westerkamp committed
315
                normalize_hists_kernel_many_blocks<256, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
wester committed
316
            else if (nthreads == 512)
a  
Kai Westerkamp committed
317
                normalize_hists_kernel_many_blocks<512, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
wester committed
318
            else
wester committed
319
                cv::gpu::error("normalize_hists: histogram's size is too big, try to decrease number of bins", __FILE__, __LINE__, "normalize_hists");
wester committed
320 321

            cudaSafeCall( cudaGetLastError() );
a  
Kai Westerkamp committed
322 323

            cudaSafeCall( cudaDeviceSynchronize() );
wester committed
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
        }


        //---------------------------------------------------------------------
        //  Linear SVM based classification
        //

       // return confidence values not just positive location
       template <int nthreads, // Number of threads per one histogram block
                 int nblocks>  // Number of histogram block processed by single GPU thread block
       __global__ void compute_confidence_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
                                                                                                           const int win_block_stride_x, const int win_block_stride_y,
                                                                                                           const float* block_hists, const float* coefs,
                                                                                                           float free_coef, float threshold, float* confidences)
       {
           const int win_x = threadIdx.z;
           if (blockIdx.x * blockDim.z + win_x >= img_win_width)
                   return;

           const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
                                                                                blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
                                                                               cblock_hist_size;

           float product = 0.f;
           for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
           {
                   int offset_y = i / cdescr_width;
                   int offset_x = i - offset_y * cdescr_width;
                   product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
           }

           __shared__ float products[nthreads * nblocks];

           const int tid = threadIdx.z * nthreads + threadIdx.x;

           reduce<nthreads>(products, product, tid, plus<float>());

           if (threadIdx.x == 0)
               confidences[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = product + free_coef;

       }

       void compute_confidence_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
                                               int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
wester committed
368
                                               float* coefs, float free_coef, float threshold, float *confidences)
wester committed
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
       {
           const int nthreads = 256;
           const int nblocks = 1;

           int win_block_stride_x = win_stride_x / block_stride_x;
           int win_block_stride_y = win_stride_y / block_stride_y;
           int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
           int img_win_height = (height - win_height + win_stride_y) / win_stride_y;

           dim3 threads(nthreads, 1, nblocks);
           dim3 grid(divUp(img_win_width, nblocks), img_win_height);

           cudaSafeCall(cudaFuncSetCacheConfig(compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>,
                                                                                   cudaFuncCachePreferL1));

wester committed
384
           int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
wester committed
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
                                                       block_stride_x;
           compute_confidence_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>(
                   img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
                   block_hists, coefs, free_coef, threshold, confidences);
           cudaSafeCall(cudaThreadSynchronize());
       }



        template <int nthreads, // Number of threads per one histogram block
                  int nblocks>  // Number of histogram block processed by single GPU thread block
        __global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
                                                          const int win_block_stride_x, const int win_block_stride_y,
                                                          const float* block_hists, const float* coefs,
                                                          float free_coef, float threshold, unsigned char* labels)
        {
            const int win_x = threadIdx.z;
            if (blockIdx.x * blockDim.z + win_x >= img_win_width)
                return;

            const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
                                               blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
                                              cblock_hist_size;

            float product = 0.f;
            for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
            {
                int offset_y = i / cdescr_width;
                int offset_x = i - offset_y * cdescr_width;
                product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
            }

            __shared__ float products[nthreads * nblocks];

            const int tid = threadIdx.z * nthreads + threadIdx.x;

            reduce<nthreads>(products, product, tid, plus<float>());

            if (threadIdx.x == 0)
                labels[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = (product + free_coef >= threshold);
        }


        void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
                            int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
wester committed
430
                            float* coefs, float free_coef, float threshold, unsigned char* labels)
wester committed
431 432 433 434 435 436 437 438 439 440 441 442 443 444
        {
            const int nthreads = 256;
            const int nblocks = 1;

            int win_block_stride_x = win_stride_x / block_stride_x;
            int win_block_stride_y = win_stride_y / block_stride_y;
            int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
            int img_win_height = (height - win_height + win_stride_y) / win_stride_y;

            dim3 threads(nthreads, 1, nblocks);
            dim3 grid(divUp(img_win_width, nblocks), img_win_height);

            cudaSafeCall(cudaFuncSetCacheConfig(classify_hists_kernel_many_blocks<nthreads, nblocks>, cudaFuncCachePreferL1));

wester committed
445
            int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
wester committed
446 447 448 449 450 451 452 453 454 455 456 457 458
            classify_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>(
                img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
                block_hists, coefs, free_coef, threshold, labels);
            cudaSafeCall( cudaGetLastError() );

            cudaSafeCall( cudaDeviceSynchronize() );
        }

        //----------------------------------------------------------------------------
        // Extract descriptors


        template <int nthreads>
a  
Kai Westerkamp committed
459 460
        __global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y,
                                                      const float* block_hists, PtrStepf descriptors)
wester committed
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
        {
            // Get left top corner of the window in src
            const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
                                               blockIdx.x * win_block_stride_x) * cblock_hist_size;

            // Get left top corner of the window in dst
            float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);

            // Copy elements from src to dst
            for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
            {
                int offset_y = i / cdescr_width;
                int offset_x = i - offset_y * cdescr_width;
                descriptor[i] = hist[offset_y * img_block_width * cblock_hist_size + offset_x];
            }
        }


a  
Kai Westerkamp committed
479
        void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x,
wester committed
480
                                    int height, int width, float* block_hists, PtrStepSzf descriptors)
wester committed
481 482 483 484 485 486 487 488 489 490
        {
            const int nthreads = 256;

            int win_block_stride_x = win_stride_x / block_stride_x;
            int win_block_stride_y = win_stride_y / block_stride_y;
            int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
            int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
            dim3 threads(nthreads, 1);
            dim3 grid(img_win_width, img_win_height);

wester committed
491
            int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
a  
Kai Westerkamp committed
492 493
            extract_descrs_by_rows_kernel<nthreads><<<grid, threads>>>(
                img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
wester committed
494
            cudaSafeCall( cudaGetLastError() );
a  
Kai Westerkamp committed
495 496

            cudaSafeCall( cudaDeviceSynchronize() );
wester committed
497 498 499 500
        }


        template <int nthreads>
a  
Kai Westerkamp committed
501 502
        __global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x,
                                                      const int win_block_stride_y, const float* block_hists,
wester committed
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
                                                      PtrStepf descriptors)
        {
            // Get left top corner of the window in src
            const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
                                               blockIdx.x * win_block_stride_x) * cblock_hist_size;

            // Get left top corner of the window in dst
            float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);

            // Copy elements from src to dst
            for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
            {
                int block_idx = i / cblock_hist_size;
                int idx_in_block = i - block_idx * cblock_hist_size;

                int y = block_idx / cnblocks_win_x;
                int x = block_idx - y * cnblocks_win_x;

                descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block]
                    = hist[(y * img_block_width  + x) * cblock_hist_size + idx_in_block];
            }
        }


a  
Kai Westerkamp committed
527
        void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x,
wester committed
528
                                    int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
a  
Kai Westerkamp committed
529
                                    PtrStepSzf descriptors)
wester committed
530 531 532 533 534 535 536 537 538 539
        {
            const int nthreads = 256;

            int win_block_stride_x = win_stride_x / block_stride_x;
            int win_block_stride_y = win_stride_y / block_stride_y;
            int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
            int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
            dim3 threads(nthreads, 1);
            dim3 grid(img_win_width, img_win_height);

wester committed
540
            int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
a  
Kai Westerkamp committed
541 542
            extract_descrs_by_cols_kernel<nthreads><<<grid, threads>>>(
                img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
wester committed
543
            cudaSafeCall( cudaGetLastError() );
a  
Kai Westerkamp committed
544 545

            cudaSafeCall( cudaDeviceSynchronize() );
wester committed
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
        }

        //----------------------------------------------------------------------------
        // Gradients computation


        template <int nthreads, int correct_gamma>
        __global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrStepb img,
                                                      float angle_scale, PtrStepf grad, PtrStepb qangle)
        {
            const int x = blockIdx.x * blockDim.x + threadIdx.x;

            const uchar4* row = (const uchar4*)img.ptr(blockIdx.y);

            __shared__ float sh_row[(nthreads + 2) * 3];

            uchar4 val;
            if (x < width)
                val = row[x];
            else
                val = row[width - 2];

            sh_row[threadIdx.x + 1] = val.x;
            sh_row[threadIdx.x + 1 + (nthreads + 2)] = val.y;
            sh_row[threadIdx.x + 1 + 2 * (nthreads + 2)] = val.z;

            if (threadIdx.x == 0)
            {
                val = row[::max(x - 1, 1)];
                sh_row[0] = val.x;
                sh_row[(nthreads + 2)] = val.y;
                sh_row[2 * (nthreads + 2)] = val.z;
            }

            if (threadIdx.x == blockDim.x - 1)
            {
                val = row[::min(x + 1, width - 2)];
                sh_row[blockDim.x + 1] = val.x;
                sh_row[blockDim.x + 1 + (nthreads + 2)] = val.y;
                sh_row[blockDim.x + 1 + 2 * (nthreads + 2)] = val.z;
            }

            __syncthreads();
            if (x < width)
            {
                float3 a, b;

                b.x = sh_row[threadIdx.x + 2];
                b.y = sh_row[threadIdx.x + 2 + (nthreads + 2)];
                b.z = sh_row[threadIdx.x + 2 + 2 * (nthreads + 2)];
                a.x = sh_row[threadIdx.x];
                a.y = sh_row[threadIdx.x + (nthreads + 2)];
                a.z = sh_row[threadIdx.x + 2 * (nthreads + 2)];

                float3 dx;
                if (correct_gamma)
                    dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
                else
                    dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);

                float3 dy = make_float3(0.f, 0.f, 0.f);

                if (blockIdx.y > 0 && blockIdx.y < height - 1)
                {
                    val = ((const uchar4*)img.ptr(blockIdx.y - 1))[x];
                    a = make_float3(val.x, val.y, val.z);

                    val = ((const uchar4*)img.ptr(blockIdx.y + 1))[x];
                    b = make_float3(val.x, val.y, val.z);

                    if (correct_gamma)
                        dy = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
                    else
                        dy = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);
                }

                float best_dx = dx.x;
                float best_dy = dy.x;

                float mag0 = dx.x * dx.x + dy.x * dy.x;
                float mag1 = dx.y * dx.y + dy.y * dy.y;
                if (mag0 < mag1)
                {
                    best_dx = dx.y;
                    best_dy = dy.y;
                    mag0 = mag1;
                }

                mag1 = dx.z * dx.z + dy.z * dy.z;
                if (mag0 < mag1)
                {
                    best_dx = dx.z;
                    best_dy = dy.z;
                    mag0 = mag1;
                }

                mag0 = ::sqrtf(mag0);

                float ang = (::atan2f(best_dy, best_dx) + CV_PI_F) * angle_scale - 0.5f;
                int hidx = (int)::floorf(ang);
                ang -= hidx;
                hidx = (hidx + cnbins) % cnbins;

                ((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
                ((float2*)grad.ptr(blockIdx.y))[x] = make_float2(mag0 * (1.f - ang), mag0 * ang);
            }
        }


a  
Kai Westerkamp committed
655 656
        void compute_gradients_8UC4(int nbins, int height, int width, const PtrStepSzb& img,
                                    float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
wester committed
657 658 659 660 661 662 663 664
        {
            (void)nbins;
            const int nthreads = 256;

            dim3 bdim(nthreads, 1);
            dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));

            if (correct_gamma)
a  
Kai Westerkamp committed
665
                compute_gradients_8UC4_kernel<nthreads, 1><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
wester committed
666
            else
a  
Kai Westerkamp committed
667
                compute_gradients_8UC4_kernel<nthreads, 0><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
wester committed
668 669

            cudaSafeCall( cudaGetLastError() );
a  
Kai Westerkamp committed
670 671

            cudaSafeCall( cudaDeviceSynchronize() );
wester committed
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
        }

        template <int nthreads, int correct_gamma>
        __global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrStepb img,
                                                      float angle_scale, PtrStepf grad, PtrStepb qangle)
        {
            const int x = blockIdx.x * blockDim.x + threadIdx.x;

            const unsigned char* row = (const unsigned char*)img.ptr(blockIdx.y);

            __shared__ float sh_row[nthreads + 2];

            if (x < width)
                sh_row[threadIdx.x + 1] = row[x];
            else
                sh_row[threadIdx.x + 1] = row[width - 2];

            if (threadIdx.x == 0)
                sh_row[0] = row[::max(x - 1, 1)];

            if (threadIdx.x == blockDim.x - 1)
                sh_row[blockDim.x + 1] = row[::min(x + 1, width - 2)];

            __syncthreads();
            if (x < width)
            {
                float dx;

                if (correct_gamma)
                    dx = ::sqrtf(sh_row[threadIdx.x + 2]) - ::sqrtf(sh_row[threadIdx.x]);
                else
                    dx = sh_row[threadIdx.x + 2] - sh_row[threadIdx.x];

                float dy = 0.f;
                if (blockIdx.y > 0 && blockIdx.y < height - 1)
                {
                    float a = ((const unsigned char*)img.ptr(blockIdx.y + 1))[x];
                    float b = ((const unsigned char*)img.ptr(blockIdx.y - 1))[x];
                    if (correct_gamma)
                        dy = ::sqrtf(a) - ::sqrtf(b);
                    else
                        dy = a - b;
                }
                float mag = ::sqrtf(dx * dx + dy * dy);

                float ang = (::atan2f(dy, dx) + CV_PI_F) * angle_scale - 0.5f;
                int hidx = (int)::floorf(ang);
                ang -= hidx;
                hidx = (hidx + cnbins) % cnbins;

                ((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
                ((float2*)  grad.ptr(blockIdx.y))[x] = make_float2(mag * (1.f - ang), mag * ang);
            }
        }


a  
Kai Westerkamp committed
728 729
        void compute_gradients_8UC1(int nbins, int height, int width, const PtrStepSzb& img,
                                    float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
wester committed
730 731 732 733 734 735 736 737
        {
            (void)nbins;
            const int nthreads = 256;

            dim3 bdim(nthreads, 1);
            dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));

            if (correct_gamma)
a  
Kai Westerkamp committed
738
                compute_gradients_8UC1_kernel<nthreads, 1><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
wester committed
739
            else
a  
Kai Westerkamp committed
740
                compute_gradients_8UC1_kernel<nthreads, 0><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
wester committed
741 742

            cudaSafeCall( cudaGetLastError() );
a  
Kai Westerkamp committed
743 744

            cudaSafeCall( cudaDeviceSynchronize() );
wester committed
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
        }



        //-------------------------------------------------------------------
        // Resize

        texture<uchar4, 2, cudaReadModeNormalizedFloat> resize8UC4_tex;
        texture<uchar,  2, cudaReadModeNormalizedFloat> resize8UC1_tex;

        __global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar> dst, int colOfs)
        {
            unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
            unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (x < dst.cols && y < dst.rows)
                dst.ptr(y)[x] = tex2D(resize8UC1_tex, x * sx + colOfs, y * sy) * 255;
        }

        __global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar4> dst, int colOfs)
        {
            unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
            unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (x < dst.cols && y < dst.rows)
            {
                float4 val = tex2D(resize8UC4_tex, x * sx + colOfs, y * sy);
                dst.ptr(y)[x] = make_uchar4(val.x * 255, val.y * 255, val.z * 255, val.w * 255);
            }
        }

        template<class T, class TEX>
        static void resize_for_hog(const PtrStepSzb& src, PtrStepSzb dst, TEX& tex)
        {
            tex.filterMode = cudaFilterModeLinear;

            size_t texOfs = 0;
            int colOfs = 0;

            cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
            cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );

            if (texOfs != 0)
            {
                colOfs = static_cast<int>( texOfs/sizeof(T) );
                cudaSafeCall( cudaUnbindTexture(tex) );
                cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );
            }

            dim3 threads(32, 8);
            dim3 grid(divUp(dst.cols, threads.x), divUp(dst.rows, threads.y));

            float sx = static_cast<float>(src.cols) / dst.cols;
            float sy = static_cast<float>(src.rows) / dst.rows;

            resize_for_hog_kernel<<<grid, threads>>>(sx, sy, (PtrStepSz<T>)dst, colOfs);
            cudaSafeCall( cudaGetLastError() );

            cudaSafeCall( cudaDeviceSynchronize() );

            cudaSafeCall( cudaUnbindTexture(tex) );
        }

        void resize_8UC1(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }
        void resize_8UC4(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }
    } // namespace hog
wester committed
811
}}} // namespace cv { namespace gpu { namespace device
wester committed
812 813 814


#endif /* CUDA_DISABLER */