stereocsbp.cu 38.4 KB
Newer Older
wester committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

#if !defined CUDA_DISABLER

wester committed
45 46 47 48 49
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/functional.hpp"
wester committed
50

wester committed
51
namespace cv { namespace gpu { namespace device
wester committed
52 53 54 55
{
    namespace stereocsbp
    {
        ///////////////////////////////////////////////////////////////
wester committed
56
        /////////////////////// load constants ////////////////////////
wester committed
57 58
        ///////////////////////////////////////////////////////////////

wester committed
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
        __constant__ int cndisp;

        __constant__ float cmax_data_term;
        __constant__ float cdata_weight;
        __constant__ float cmax_disc_term;
        __constant__ float cdisc_single_jump;

        __constant__ int cth;

        __constant__ size_t cimg_step;
        __constant__ size_t cmsg_step;
        __constant__ size_t cdisp_step1;
        __constant__ size_t cdisp_step2;

        __constant__ uchar* cleft;
        __constant__ uchar* cright;
        __constant__ uchar* ctemp;


        void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th,
                            const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& temp)
wester committed
80
        {
wester committed
81 82 83 84 85 86 87 88 89 90 91 92 93 94
            cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) );

            cudaSafeCall( cudaMemcpyToSymbol(cmax_data_term,    &max_data_term,    sizeof(float)) );
            cudaSafeCall( cudaMemcpyToSymbol(cdata_weight,      &data_weight,      sizeof(float)) );
            cudaSafeCall( cudaMemcpyToSymbol(cmax_disc_term,    &max_disc_term,    sizeof(float)) );
            cudaSafeCall( cudaMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) );

            cudaSafeCall( cudaMemcpyToSymbol(cth, &min_disp_th, sizeof(int)) );

            cudaSafeCall( cudaMemcpyToSymbol(cimg_step, &left.step, sizeof(size_t)) );

            cudaSafeCall( cudaMemcpyToSymbol(cleft,  &left.data,  sizeof(left.data)) );
            cudaSafeCall( cudaMemcpyToSymbol(cright, &right.data, sizeof(right.data)) );
            cudaSafeCall( cudaMemcpyToSymbol(ctemp, &temp.data, sizeof(temp.data)) );
wester committed
95
        }
wester committed
96 97 98 99 100 101 102

        ///////////////////////////////////////////////////////////////
        /////////////////////// init data cost ////////////////////////
        ///////////////////////////////////////////////////////////////

        template <int channels> struct DataCostPerPixel;
        template <> struct DataCostPerPixel<1>
wester committed
103
        {
wester committed
104 105 106 107
            static __device__ __forceinline__ float compute(const uchar* left, const uchar* right)
            {
                int l = *(left);
                int r = *(right);
wester committed
108

wester committed
109 110 111 112
                return fmin(cdata_weight * ::abs(l - r), cdata_weight * cmax_data_term);
            }
        };
        template <> struct DataCostPerPixel<3>
wester committed
113
        {
wester committed
114 115 116 117
            static __device__ __forceinline__ float compute(const uchar* left, const uchar* right)
            {
                uchar3 l = *((const uchar3*)left);
                uchar3 r = *((const uchar3*)right);
wester committed
118

wester committed
119 120 121
                float tb = 0.114f * ::abs((int)l.x - r.x);
                float tg = 0.587f * ::abs((int)l.y - r.y);
                float tr = 0.299f * ::abs((int)l.z - r.z);
wester committed
122

wester committed
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
                return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term);
            }
        };
        template <> struct DataCostPerPixel<4>
        {
            static __device__ __forceinline__ float compute(const uchar* left, const uchar* right)
            {
                uchar4 l = *((const uchar4*)left);
                uchar4 r = *((const uchar4*)right);

                float tb = 0.114f * ::abs((int)l.x - r.x);
                float tg = 0.587f * ::abs((int)l.y - r.y);
                float tr = 0.299f * ::abs((int)l.z - r.z);

                return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term);
            }
        };
wester committed
140 141

        template <typename T>
wester committed
142
        __global__ void get_first_k_initial_global(T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane)
wester committed
143 144 145 146 147 148
        {
            int x = blockIdx.x * blockDim.x + threadIdx.x;
            int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (y < h && x < w)
            {
wester committed
149 150 151
                T* selected_disparity = selected_disp_pyr + y * cmsg_step + x;
                T* data_cost_selected = data_cost_selected_ + y * cmsg_step + x;
                T* data_cost = (T*)ctemp + y * cmsg_step + x;
wester committed
152 153 154 155 156

                for(int i = 0; i < nr_plane; i++)
                {
                    T minimum = device::numeric_limits<T>::max();
                    int id = 0;
wester committed
157
                    for(int d = 0; d < cndisp; d++)
wester committed
158
                    {
wester committed
159
                        T cur = data_cost[d * cdisp_step1];
wester committed
160 161 162 163 164 165 166
                        if(cur < minimum)
                        {
                            minimum = cur;
                            id = d;
                        }
                    }

wester committed
167 168 169
                    data_cost_selected[i  * cdisp_step1] = minimum;
                    selected_disparity[i  * cdisp_step1] = id;
                    data_cost         [id * cdisp_step1] = numeric_limits<T>::max();
wester committed
170 171 172 173 174 175
                }
            }
        }


        template <typename T>
wester committed
176
        __global__ void get_first_k_initial_local(T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane)
wester committed
177 178 179 180 181 182
        {
            int x = blockIdx.x * blockDim.x + threadIdx.x;
            int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (y < h && x < w)
            {
wester committed
183 184 185
                T* selected_disparity = selected_disp_pyr + y * cmsg_step + x;
                T* data_cost_selected = data_cost_selected_ + y * cmsg_step + x;
                T* data_cost = (T*)ctemp + y * cmsg_step + x;
wester committed
186 187 188

                int nr_local_minimum = 0;

wester committed
189 190 191
                T prev = data_cost[0 * cdisp_step1];
                T cur  = data_cost[1 * cdisp_step1];
                T next = data_cost[2 * cdisp_step1];
wester committed
192

wester committed
193
                for (int d = 1; d < cndisp - 1 && nr_local_minimum < nr_plane; d++)
wester committed
194 195 196
                {
                    if (cur < prev && cur < next)
                    {
wester committed
197 198
                        data_cost_selected[nr_local_minimum * cdisp_step1] = cur;
                        selected_disparity[nr_local_minimum * cdisp_step1] = d;
wester committed
199

wester committed
200
                        data_cost[d * cdisp_step1] = numeric_limits<T>::max();
wester committed
201 202 203 204 205

                        nr_local_minimum++;
                    }
                    prev = cur;
                    cur = next;
wester committed
206
                    next = data_cost[(d + 1) * cdisp_step1];
wester committed
207 208 209 210 211 212 213
                }

                for (int i = nr_local_minimum; i < nr_plane; i++)
                {
                    T minimum = numeric_limits<T>::max();
                    int id = 0;

wester committed
214
                    for (int d = 0; d < cndisp; d++)
wester committed
215
                    {
wester committed
216
                        cur = data_cost[d * cdisp_step1];
wester committed
217 218 219 220 221 222
                        if (cur < minimum)
                        {
                            minimum = cur;
                            id = d;
                        }
                    }
wester committed
223 224
                    data_cost_selected[i * cdisp_step1] = minimum;
                    selected_disparity[i * cdisp_step1] = id;
wester committed
225

wester committed
226
                    data_cost[id * cdisp_step1] = numeric_limits<T>::max();
wester committed
227 228 229 230 231
                }
            }
        }

        template <typename T, int channels>
wester committed
232
        __global__ void init_data_cost(int h, int w, int level)
wester committed
233 234 235 236 237 238 239 240 241 242 243 244
        {
            int x = blockIdx.x * blockDim.x + threadIdx.x;
            int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (y < h && x < w)
            {
                int y0 = y << level;
                int yt = (y + 1) << level;

                int x0 = x << level;
                int xt = (x + 1) << level;

wester committed
245
                T* data_cost = (T*)ctemp + y * cmsg_step + x;
wester committed
246

wester committed
247
                for(int d = 0; d < cndisp; ++d)
wester committed
248 249 250 251 252 253 254
                {
                    float val = 0.0f;
                    for(int yi = y0; yi < yt; yi++)
                    {
                        for(int xi = x0; xi < xt; xi++)
                        {
                            int xr = xi - d;
wester committed
255 256
                            if(d < cth || xr < 0)
                                val += cdata_weight * cmax_data_term;
wester committed
257 258 259 260 261
                            else
                            {
                                const uchar* lle = cleft + yi * cimg_step + xi * channels;
                                const uchar* lri = cright + yi * cimg_step + xr * channels;

wester committed
262
                                val += DataCostPerPixel<channels>::compute(lle, lri);
wester committed
263 264 265
                            }
                        }
                    }
wester committed
266
                    data_cost[cdisp_step1 * d] = saturate_cast<T>(val);
wester committed
267 268 269 270 271
                }
            }
        }

        template <typename T, int winsz, int channels>
wester committed
272
        __global__ void init_data_cost_reduce(int level, int rows, int cols, int h)
wester committed
273 274 275 276 277 278 279
        {
            int x_out = blockIdx.x;
            int y_out = blockIdx.y % h;
            int d = (blockIdx.y / h) * blockDim.z + threadIdx.z;

            int tid = threadIdx.x;

wester committed
280
            if (d < cndisp)
wester committed
281 282 283 284 285 286 287 288 289
            {
                int x0 = x_out << level;
                int y0 = y_out << level;

                int len = ::min(y0 + winsz, rows) - y0;

                float val = 0.0f;
                if (x0 + tid < cols)
                {
wester committed
290 291
                    if (x0 + tid - d < 0 || d < cth)
                        val = cdata_weight * cmax_data_term * len;
wester committed
292 293 294 295 296 297 298
                    else
                    {
                        const uchar* lle =  cleft + y0 * cimg_step + channels * (x0 + tid    );
                        const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - d);

                        for(int y = 0; y < len; ++y)
                        {
wester committed
299
                            val += DataCostPerPixel<channels>::compute(lle, lri);
wester committed
300 301 302 303 304 305 306 307 308 309 310

                            lle += cimg_step;
                            lri += cimg_step;
                        }
                    }
                }

                extern __shared__ float smem[];

                reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>());

wester committed
311
                T* data_cost = (T*)ctemp + y_out * cmsg_step + x_out;
wester committed
312 313

                if (tid == 0)
wester committed
314
                    data_cost[cdisp_step1 * d] = saturate_cast<T>(val);
wester committed
315 316 317 318 319
            }
        }


        template <typename T>
wester committed
320
        void init_data_cost_caller_(int /*rows*/, int /*cols*/, int h, int w, int level, int /*ndisp*/, int channels, cudaStream_t stream)
wester committed
321 322 323 324 325 326 327 328 329
        {
            dim3 threads(32, 8, 1);
            dim3 grid(1, 1, 1);

            grid.x = divUp(w, threads.x);
            grid.y = divUp(h, threads.y);

            switch (channels)
            {
wester committed
330 331 332 333
            case 1: init_data_cost<T, 1><<<grid, threads, 0, stream>>>(h, w, level); break;
            case 3: init_data_cost<T, 3><<<grid, threads, 0, stream>>>(h, w, level); break;
            case 4: init_data_cost<T, 4><<<grid, threads, 0, stream>>>(h, w, level); break;
            default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "init_data_cost_caller_");
wester committed
334 335 336 337
            }
        }

        template <typename T, int winsz>
wester committed
338
        void init_data_cost_reduce_caller_(int rows, int cols, int h, int w, int level, int ndisp, int channels, cudaStream_t stream)
wester committed
339 340 341 342 343 344 345 346 347 348
        {
            const int threadsNum = 256;
            const size_t smem_size = threadsNum * sizeof(float);

            dim3 threads(winsz, 1, threadsNum / winsz);
            dim3 grid(w, h, 1);
            grid.y *= divUp(ndisp, threads.z);

            switch (channels)
            {
wester committed
349 350 351 352
            case 1: init_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(level, rows, cols, h); break;
            case 3: init_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(level, rows, cols, h); break;
            case 4: init_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(level, rows, cols, h); break;
            default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "init_data_cost_reduce_caller_");
wester committed
353 354 355 356
            }
        }

        template<class T>
wester committed
357 358
        void init_data_cost(int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step,
                    int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream)
wester committed
359 360
        {

wester committed
361
            typedef void (*InitDataCostCaller)(int cols, int rows, int w, int h, int level, int ndisp, int channels, cudaStream_t stream);
wester committed
362 363 364 365 366 367 368 369 370

            static const InitDataCostCaller init_data_cost_callers[] =
            {
                init_data_cost_caller_<T>, init_data_cost_caller_<T>, init_data_cost_reduce_caller_<T, 4>,
                init_data_cost_reduce_caller_<T, 8>, init_data_cost_reduce_caller_<T, 16>, init_data_cost_reduce_caller_<T, 32>,
                init_data_cost_reduce_caller_<T, 64>, init_data_cost_reduce_caller_<T, 128>, init_data_cost_reduce_caller_<T, 256>
            };

            size_t disp_step = msg_step * h;
wester committed
371 372
            cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) );
            cudaSafeCall( cudaMemcpyToSymbol(cmsg_step,  &msg_step,  sizeof(size_t)) );
wester committed
373

wester committed
374
            init_data_cost_callers[level](rows, cols, h, w, level, ndisp, channels, stream);
wester committed
375 376 377 378 379 380 381 382 383 384 385 386
            cudaSafeCall( cudaGetLastError() );

            if (stream == 0)
                cudaSafeCall( cudaDeviceSynchronize() );

            dim3 threads(32, 8, 1);
            dim3 grid(1, 1, 1);

            grid.x = divUp(w, threads.x);
            grid.y = divUp(h, threads.y);

            if (use_local_init_data_cost == true)
wester committed
387
                get_first_k_initial_local<<<grid, threads, 0, stream>>> (data_cost_selected, disp_selected_pyr, h, w, nr_plane);
wester committed
388
            else
wester committed
389
                get_first_k_initial_global<<<grid, threads, 0, stream>>>(data_cost_selected, disp_selected_pyr, h, w, nr_plane);
wester committed
390 391 392 393 394 395 396

            cudaSafeCall( cudaGetLastError() );

            if (stream == 0)
                cudaSafeCall( cudaDeviceSynchronize() );
        }

wester committed
397 398
        template void init_data_cost(int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step,
                    int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream);
wester committed
399

wester committed
400 401
        template void init_data_cost(int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step,
                    int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream);
wester committed
402 403 404 405 406 407

        ///////////////////////////////////////////////////////////////
        ////////////////////// compute data cost //////////////////////
        ///////////////////////////////////////////////////////////////

        template <typename T, int channels>
wester committed
408
        __global__ void compute_data_cost(const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane)
wester committed
409 410 411 412 413 414 415 416 417 418 419 420
        {
            int x = blockIdx.x * blockDim.x + threadIdx.x;
            int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (y < h && x < w)
            {
                int y0 = y << level;
                int yt = (y + 1) << level;

                int x0 = x << level;
                int xt = (x + 1) << level;

wester committed
421 422
                const T* selected_disparity = selected_disp_pyr + y/2 * cmsg_step + x/2;
                T* data_cost = data_cost_ + y * cmsg_step + x;
wester committed
423 424 425 426 427 428 429 430

                for(int d = 0; d < nr_plane; d++)
                {
                    float val = 0.0f;
                    for(int yi = y0; yi < yt; yi++)
                    {
                        for(int xi = x0; xi < xt; xi++)
                        {
wester committed
431
                            int sel_disp = selected_disparity[d * cdisp_step2];
wester committed
432 433
                            int xr = xi - sel_disp;

wester committed
434 435
                            if (xr < 0 || sel_disp < cth)
                                val += cdata_weight * cmax_data_term;
wester committed
436 437 438 439 440
                            else
                            {
                                const uchar* left_x = cleft + yi * cimg_step + xi * channels;
                                const uchar* right_x = cright + yi * cimg_step + xr * channels;

wester committed
441
                                val += DataCostPerPixel<channels>::compute(left_x, right_x);
wester committed
442 443 444
                            }
                        }
                    }
wester committed
445
                    data_cost[cdisp_step1 * d] = saturate_cast<T>(val);
wester committed
446 447 448 449 450
                }
            }
        }

        template <typename T, int winsz, int channels>
wester committed
451
        __global__ void compute_data_cost_reduce(const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane)
wester committed
452 453 454 455 456 457 458
        {
            int x_out = blockIdx.x;
            int y_out = blockIdx.y % h;
            int d = (blockIdx.y / h) * blockDim.z + threadIdx.z;

            int tid = threadIdx.x;

wester committed
459 460
            const T* selected_disparity = selected_disp_pyr + y_out/2 * cmsg_step + x_out/2;
            T* data_cost = data_cost_ + y_out * cmsg_step + x_out;
wester committed
461 462 463

            if (d < nr_plane)
            {
wester committed
464
                int sel_disp = selected_disparity[d * cdisp_step2];
wester committed
465 466 467 468 469 470 471 472 473

                int x0 = x_out << level;
                int y0 = y_out << level;

                int len = ::min(y0 + winsz, rows) - y0;

                float val = 0.0f;
                if (x0 + tid < cols)
                {
wester committed
474 475
                    if (x0 + tid - sel_disp < 0 || sel_disp < cth)
                        val = cdata_weight * cmax_data_term * len;
wester committed
476 477 478 479 480 481 482
                    else
                    {
                        const uchar* lle =  cleft + y0 * cimg_step + channels * (x0 + tid    );
                        const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - sel_disp);

                        for(int y = 0; y < len; ++y)
                        {
wester committed
483
                            val += DataCostPerPixel<channels>::compute(lle, lri);
wester committed
484 485 486 487 488 489 490 491 492 493 494 495

                            lle += cimg_step;
                            lri += cimg_step;
                        }
                    }
                }

                extern __shared__ float smem[];

                reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>());

                if (tid == 0)
wester committed
496
                    data_cost[cdisp_step1 * d] = saturate_cast<T>(val);
wester committed
497 498 499 500
            }
        }

        template <typename T>
wester committed
501 502
        void compute_data_cost_caller_(const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/,
                                      int h, int w, int level, int nr_plane, int channels, cudaStream_t stream)
wester committed
503 504 505 506 507 508 509 510 511
        {
            dim3 threads(32, 8, 1);
            dim3 grid(1, 1, 1);

            grid.x = divUp(w, threads.x);
            grid.y = divUp(h, threads.y);

            switch(channels)
            {
wester committed
512 513 514 515
            case 1: compute_data_cost<T, 1><<<grid, threads, 0, stream>>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break;
            case 3: compute_data_cost<T, 3><<<grid, threads, 0, stream>>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break;
            case 4: compute_data_cost<T, 4><<<grid, threads, 0, stream>>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break;
            default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "compute_data_cost_caller_");
wester committed
516 517 518 519
            }
        }

        template <typename T, int winsz>
wester committed
520 521
        void compute_data_cost_reduce_caller_(const T* disp_selected_pyr, T* data_cost, int rows, int cols,
                                      int h, int w, int level, int nr_plane, int channels, cudaStream_t stream)
wester committed
522 523 524 525 526 527 528 529 530 531
        {
            const int threadsNum = 256;
            const size_t smem_size = threadsNum * sizeof(float);

            dim3 threads(winsz, 1, threadsNum / winsz);
            dim3 grid(w, h, 1);
            grid.y *= divUp(nr_plane, threads.z);

            switch (channels)
            {
wester committed
532 533 534 535
            case 1: compute_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break;
            case 3: compute_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break;
            case 4: compute_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break;
            default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "compute_data_cost_reduce_caller_");
wester committed
536 537 538 539
            }
        }

        template<class T>
wester committed
540 541
        void compute_data_cost(const T* disp_selected_pyr, T* data_cost, size_t msg_step,
                               int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream)
wester committed
542
        {
wester committed
543 544
            typedef void (*ComputeDataCostCaller)(const T* disp_selected_pyr, T* data_cost, int rows, int cols,
                int h, int w, int level, int nr_plane, int channels, cudaStream_t stream);
wester committed
545 546 547 548 549 550 551 552 553 554

            static const ComputeDataCostCaller callers[] =
            {
                compute_data_cost_caller_<T>, compute_data_cost_caller_<T>, compute_data_cost_reduce_caller_<T, 4>,
                compute_data_cost_reduce_caller_<T, 8>, compute_data_cost_reduce_caller_<T, 16>, compute_data_cost_reduce_caller_<T, 32>,
                compute_data_cost_reduce_caller_<T, 64>, compute_data_cost_reduce_caller_<T, 128>, compute_data_cost_reduce_caller_<T, 256>
            };

            size_t disp_step1 = msg_step * h;
            size_t disp_step2 = msg_step * h2;
wester committed
555 556 557
            cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) );
            cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) );
            cudaSafeCall( cudaMemcpyToSymbol(cmsg_step,  &msg_step,  sizeof(size_t)) );
wester committed
558

wester committed
559
            callers[level](disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, stream);
wester committed
560 561 562 563 564 565
            cudaSafeCall( cudaGetLastError() );

            if (stream == 0)
                cudaSafeCall( cudaDeviceSynchronize() );
        }

wester committed
566 567
        template void compute_data_cost(const short* disp_selected_pyr, short* data_cost, size_t msg_step,
                               int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream);
wester committed
568

wester committed
569 570
        template void compute_data_cost(const float* disp_selected_pyr, float* data_cost, size_t msg_step,
                               int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream);
wester committed
571 572 573 574 575 576 577 578 579 580 581 582


        ///////////////////////////////////////////////////////////////
        //////////////////////// init message /////////////////////////
        ///////////////////////////////////////////////////////////////


         template <typename T>
        __device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new,
                                                     const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,
                                                     T* data_cost_selected, T* disparity_selected_new, T* data_cost_new,
                                                     const T* data_cost_cur, const T* disparity_selected_cur,
wester committed
583
                                                     int nr_plane, int nr_plane2)
wester committed
584 585 586 587 588 589 590
        {
            for(int i = 0; i < nr_plane; i++)
            {
                T minimum = numeric_limits<T>::max();
                int id = 0;
                for(int j = 0; j < nr_plane2; j++)
                {
wester committed
591
                    T cur = data_cost_new[j * cdisp_step1];
wester committed
592 593 594 595 596 597 598
                    if(cur < minimum)
                    {
                        minimum = cur;
                        id = j;
                    }
                }

wester committed
599 600
                data_cost_selected[i * cdisp_step1] = data_cost_cur[id * cdisp_step1];
                disparity_selected_new[i * cdisp_step1] = disparity_selected_cur[id * cdisp_step2];
wester committed
601

wester committed
602 603 604 605
                u_new[i * cdisp_step1] = u_cur[id * cdisp_step2];
                d_new[i * cdisp_step1] = d_cur[id * cdisp_step2];
                l_new[i * cdisp_step1] = l_cur[id * cdisp_step2];
                r_new[i * cdisp_step1] = r_cur[id * cdisp_step2];
wester committed
606

wester committed
607
                data_cost_new[id * cdisp_step1] = numeric_limits<T>::max();
wester committed
608 609 610 611
            }
        }

        template <typename T>
wester committed
612
        __global__ void init_message(T* u_new_, T* d_new_, T* l_new_, T* r_new_,
wester committed
613 614 615
                                     const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_,
                                     T* selected_disp_pyr_new, const T* selected_disp_pyr_cur,
                                     T* data_cost_selected_, const T* data_cost_,
wester committed
616
                                     int h, int w, int nr_plane, int h2, int w2, int nr_plane2)
wester committed
617 618 619 620 621 622
        {
            int x = blockIdx.x * blockDim.x + threadIdx.x;
            int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (y < h && x < w)
            {
wester committed
623 624 625 626
                const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * cmsg_step + x/2;
                const T* d_cur = d_cur_ + ::max(0, y/2 - 1)    * cmsg_step + x/2;
                const T* l_cur = l_cur_ + (y/2)                * cmsg_step + ::min(w2-1, x/2 + 1);
                const T* r_cur = r_cur_ + (y/2)                * cmsg_step + ::max(0, x/2 - 1);
wester committed
627

wester committed
628
                T* data_cost_new = (T*)ctemp + y * cmsg_step + x;
wester committed
629

wester committed
630 631
                const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * cmsg_step + x/2;
                const T* data_cost = data_cost_ + y * cmsg_step + x;
wester committed
632 633 634

                for(int d = 0; d < nr_plane2; d++)
                {
wester committed
635
                    int idx2 = d * cdisp_step2;
wester committed
636

wester committed
637 638
                    T val  = data_cost[d * cdisp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2];
                    data_cost_new[d * cdisp_step1] = val;
wester committed
639 640
                }

wester committed
641 642
                T* data_cost_selected = data_cost_selected_ + y * cmsg_step + x;
                T* disparity_selected_new = selected_disp_pyr_new + y * cmsg_step + x;
wester committed
643

wester committed
644 645 646 647
                T* u_new = u_new_ + y * cmsg_step + x;
                T* d_new = d_new_ + y * cmsg_step + x;
                T* l_new = l_new_ + y * cmsg_step + x;
                T* r_new = r_new_ + y * cmsg_step + x;
wester committed
648

wester committed
649 650 651 652
                u_cur = u_cur_ + y/2 * cmsg_step + x/2;
                d_cur = d_cur_ + y/2 * cmsg_step + x/2;
                l_cur = l_cur_ + y/2 * cmsg_step + x/2;
                r_cur = r_cur_ + y/2 * cmsg_step + x/2;
wester committed
653 654 655

                get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur,
                                             data_cost_selected, disparity_selected_new, data_cost_new,
wester committed
656
                                             data_cost, disparity_selected_cur, nr_plane, nr_plane2);
wester committed
657 658 659 660 661
            }
        }


        template<class T>
wester committed
662
        void init_message(T* u_new, T* d_new, T* l_new, T* r_new,
wester committed
663 664 665 666 667 668 669 670
                          const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,
                          T* selected_disp_pyr_new, const T* selected_disp_pyr_cur,
                          T* data_cost_selected, const T* data_cost, size_t msg_step,
                          int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream)
        {

            size_t disp_step1 = msg_step * h;
            size_t disp_step2 = msg_step * h2;
wester committed
671 672 673
            cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) );
            cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) );
            cudaSafeCall( cudaMemcpyToSymbol(cmsg_step,   &msg_step, sizeof(size_t)) );
wester committed
674 675 676 677 678 679 680

            dim3 threads(32, 8, 1);
            dim3 grid(1, 1, 1);

            grid.x = divUp(w, threads.x);
            grid.y = divUp(h, threads.y);

wester committed
681
            init_message<<<grid, threads, 0, stream>>>(u_new, d_new, l_new, r_new,
wester committed
682 683 684
                                                       u_cur, d_cur, l_cur, r_cur,
                                                       selected_disp_pyr_new, selected_disp_pyr_cur,
                                                       data_cost_selected, data_cost,
wester committed
685
                                                       h, w, nr_plane, h2, w2, nr_plane2);
wester committed
686 687 688 689 690 691 692
            cudaSafeCall( cudaGetLastError() );

            if (stream == 0)
                cudaSafeCall( cudaDeviceSynchronize() );
        }


wester committed
693
        template void init_message(short* u_new, short* d_new, short* l_new, short* r_new,
wester committed
694 695 696 697 698
                          const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur,
                          short* selected_disp_pyr_new, const short* selected_disp_pyr_cur,
                          short* data_cost_selected, const short* data_cost, size_t msg_step,
                          int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream);

wester committed
699
        template void init_message(float* u_new, float* d_new, float* l_new, float* r_new,
wester committed
700 701 702 703 704 705 706 707 708 709 710
                          const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur,
                          float* selected_disp_pyr_new, const float* selected_disp_pyr_cur,
                          float* data_cost_selected, const float* data_cost, size_t msg_step,
                          int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream);

        ///////////////////////////////////////////////////////////////
        ////////////////////  calc all iterations /////////////////////
        ///////////////////////////////////////////////////////////////

        template <typename T>
        __device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3,
wester committed
711
                                          const T* dst_disp, const T* src_disp, int nr_plane, volatile T* temp)
wester committed
712 713 714 715 716
        {
            T minimum = numeric_limits<T>::max();

            for(int d = 0; d < nr_plane; d++)
            {
wester committed
717
                int idx = d * cdisp_step1;
wester committed
718 719 720 721 722 723 724 725 726 727 728
                T val  = data[idx] + msg1[idx] + msg2[idx] + msg3[idx];

                if(val < minimum)
                    minimum = val;

                msg_dst[idx] = val;
            }

            float sum = 0;
            for(int d = 0; d < nr_plane; d++)
            {
wester committed
729 730
                float cost_min = minimum + cmax_disc_term;
                T src_disp_reg = src_disp[d * cdisp_step1];
wester committed
731 732

                for(int d2 = 0; d2 < nr_plane; d2++)
wester committed
733
                    cost_min = fmin(cost_min, msg_dst[d2 * cdisp_step1] + cdisc_single_jump * ::abs(dst_disp[d2 * cdisp_step1] - src_disp_reg));
wester committed
734

wester committed
735
                temp[d * cdisp_step1] = saturate_cast<T>(cost_min);
wester committed
736 737 738 739 740
                sum += cost_min;
            }
            sum /= nr_plane;

            for(int d = 0; d < nr_plane; d++)
wester committed
741
                msg_dst[d * cdisp_step1] = saturate_cast<T>(temp[d * cdisp_step1] - sum);
wester committed
742 743 744
        }

        template <typename T>
wester committed
745
        __global__ void compute_message(T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i)
wester committed
746 747 748 749 750 751
        {
            int y = blockIdx.y * blockDim.y + threadIdx.y;
            int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1);

            if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
            {
wester committed
752
                const T* data = data_cost_selected + y * cmsg_step + x;
wester committed
753

wester committed
754 755 756 757
                T* u = u_ + y * cmsg_step + x;
                T* d = d_ + y * cmsg_step + x;
                T* l = l_ + y * cmsg_step + x;
                T* r = r_ + y * cmsg_step + x;
wester committed
758

wester committed
759
                const T* disp = selected_disp_pyr_cur + y * cmsg_step + x;
wester committed
760

wester committed
761
                T* temp = (T*)ctemp + y * cmsg_step + x;
wester committed
762

wester committed
763 764 765 766
                message_per_pixel(data, u, r - 1, u + cmsg_step, l + 1, disp, disp - cmsg_step, nr_plane, temp);
                message_per_pixel(data, d, d - cmsg_step, r - 1, l + 1, disp, disp + cmsg_step, nr_plane, temp);
                message_per_pixel(data, l, u + cmsg_step, d - cmsg_step, l + 1, disp, disp - 1, nr_plane, temp);
                message_per_pixel(data, r, u + cmsg_step, d - cmsg_step, r - 1, disp, disp + 1, nr_plane, temp);
wester committed
767 768 769 770 771
            }
        }


        template<class T>
wester committed
772 773
        void calc_all_iterations(T* u, T* d, T* l, T* r, const T* data_cost_selected,
            const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream)
wester committed
774 775
        {
            size_t disp_step = msg_step * h;
wester committed
776 777
            cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) );
            cudaSafeCall( cudaMemcpyToSymbol(cmsg_step,  &msg_step,  sizeof(size_t)) );
wester committed
778 779 780 781 782 783 784 785 786

            dim3 threads(32, 8, 1);
            dim3 grid(1, 1, 1);

            grid.x = divUp(w, threads.x << 1);
            grid.y = divUp(h, threads.y);

            for(int t = 0; t < iters; ++t)
            {
wester committed
787
                compute_message<<<grid, threads, 0, stream>>>(u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1);
wester committed
788 789 790 791 792 793
                cudaSafeCall( cudaGetLastError() );
            }
            if (stream == 0)
                    cudaSafeCall( cudaDeviceSynchronize() );
        };

wester committed
794 795
        template void calc_all_iterations(short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step,
            int h, int w, int nr_plane, int iters, cudaStream_t stream);
wester committed
796

wester committed
797 798
        template void calc_all_iterations(float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step,
            int h, int w, int nr_plane, int iters, cudaStream_t stream);
wester committed
799 800 801 802 803 804 805 806 807 808


        ///////////////////////////////////////////////////////////////
        /////////////////////////// output ////////////////////////////
        ///////////////////////////////////////////////////////////////


        template <typename T>
        __global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_,
                                     const T* data_cost_selected, const T* disp_selected_pyr,
wester committed
809
                                     PtrStepSz<short> disp, int nr_plane)
wester committed
810 811 812 813 814 815
        {
            int x = blockIdx.x * blockDim.x + threadIdx.x;
            int y = blockIdx.y * blockDim.y + threadIdx.y;

            if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1)
            {
wester committed
816 817
                const T* data = data_cost_selected + y * cmsg_step + x;
                const T* disp_selected = disp_selected_pyr + y * cmsg_step + x;
wester committed
818

wester committed
819 820 821 822
                const T* u = u_ + (y+1) * cmsg_step + (x+0);
                const T* d = d_ + (y-1) * cmsg_step + (x+0);
                const T* l = l_ + (y+0) * cmsg_step + (x+1);
                const T* r = r_ + (y+0) * cmsg_step + (x-1);
wester committed
823 824 825 826 827

                int best = 0;
                T best_val = numeric_limits<T>::max();
                for (int i = 0; i < nr_plane; ++i)
                {
wester committed
828
                    int idx = i * cdisp_step1;
wester committed
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
                    T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx];

                    if (val < best_val)
                    {
                        best_val = val;
                        best = saturate_cast<short>(disp_selected[idx]);
                    }
                }
                disp(y, x) = best;
            }
        }

        template<class T>
        void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step,
            const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream)
        {
            size_t disp_step = disp.rows * msg_step;
wester committed
846 847
            cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) );
            cudaSafeCall( cudaMemcpyToSymbol(cmsg_step,  &msg_step,  sizeof(size_t)) );
wester committed
848 849 850 851 852 853 854

            dim3 threads(32, 8, 1);
            dim3 grid(1, 1, 1);

            grid.x = divUp(disp.cols, threads.x);
            grid.y = divUp(disp.rows, threads.y);

wester committed
855
            compute_disp<<<grid, threads, 0, stream>>>(u, d, l, r, data_cost_selected, disp_selected, disp, nr_plane);
wester committed
856 857 858 859 860 861 862 863 864 865 866 867
            cudaSafeCall( cudaGetLastError() );

            if (stream == 0)
                cudaSafeCall( cudaDeviceSynchronize() );
        }

        template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step,
            const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream);

        template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step,
            const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream);
    } // namespace stereocsbp
wester committed
868
}}} // namespace cv { namespace gpu { namespace device {
wester committed
869 870

#endif /* CUDA_DISABLER */