datamov_utils.hpp 4.51 KB
Newer Older
wester committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

wester committed
43 44
#ifndef __OPENCV_GPU_DATAMOV_UTILS_HPP__
#define __OPENCV_GPU_DATAMOV_UTILS_HPP__
wester committed
45 46 47

#include "common.hpp"

wester committed
48
namespace cv { namespace gpu { namespace device
wester committed
49 50 51 52 53 54 55 56 57 58 59 60 61
{
    #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200

        // for Fermi memory space is detected automatically
        template <typename T> struct ForceGlob
        {
            __device__ __forceinline__ static void Load(const T* ptr, int offset, T& val)  { val = ptr[offset];  }
        };

    #else // __CUDA_ARCH__ >= 200

        #if defined(_WIN64) || defined(__LP64__)
            // 64-bit register modifier for inlined asm
wester committed
62
            #define OPENCV_GPU_ASM_PTR "l"
wester committed
63 64
        #else
            // 32-bit register modifier for inlined asm
wester committed
65
            #define OPENCV_GPU_ASM_PTR "r"
wester committed
66 67 68 69
        #endif

        template<class T> struct ForceGlob;

wester committed
70
        #define OPENCV_GPU_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \
wester committed
71 72 73 74
            template <> struct ForceGlob<base_type> \
            { \
                __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
                { \
wester committed
75
                    asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_GPU_ASM_PTR(ptr + offset)); \
wester committed
76 77 78
                } \
            };

wester committed
79
        #define OPENCV_GPU_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \
wester committed
80 81 82 83
            template <> struct ForceGlob<base_type> \
            { \
                __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
                { \
wester committed
84
                    asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast<uint*>(&val)) : OPENCV_GPU_ASM_PTR(ptr + offset)); \
wester committed
85 86 87
                } \
            };

wester committed
88 89 90 91 92 93 94 95 96
            OPENCV_GPU_DEFINE_FORCE_GLOB_B(uchar,  u8)
            OPENCV_GPU_DEFINE_FORCE_GLOB_B(schar,  s8)
            OPENCV_GPU_DEFINE_FORCE_GLOB_B(char,   b8)
            OPENCV_GPU_DEFINE_FORCE_GLOB  (ushort, u16, h)
            OPENCV_GPU_DEFINE_FORCE_GLOB  (short,  s16, h)
            OPENCV_GPU_DEFINE_FORCE_GLOB  (uint,   u32, r)
            OPENCV_GPU_DEFINE_FORCE_GLOB  (int,    s32, r)
            OPENCV_GPU_DEFINE_FORCE_GLOB  (float,  f32, f)
            OPENCV_GPU_DEFINE_FORCE_GLOB  (double, f64, d)
wester committed
97

wester committed
98 99 100
        #undef OPENCV_GPU_DEFINE_FORCE_GLOB
        #undef OPENCV_GPU_DEFINE_FORCE_GLOB_B
        #undef OPENCV_GPU_ASM_PTR
wester committed
101 102

    #endif // __CUDA_ARCH__ >= 200
wester committed
103
}}} // namespace cv { namespace gpu { namespace device
wester committed
104

wester committed
105
#endif // __OPENCV_GPU_DATAMOV_UTILS_HPP__