-
X-Received: by 10.182.4.1 with SMTP id g1mr22587obg.3.1405667748885; Fri, 18 Jul 2014
00:15:48 -0700 (PDT)
X-Received: by 10.182.4.1 with SMTP id g1mr22587obg.3.1405667748885; Fri, 18 Jul 2014
00:15:48 -0700 (PDT)
Path: news-archive.icm.edu.pl!news.icm.edu.pl!plix.pl!newsfeed2.plix.pl!goblin1!gobli
n.stu.neva.ru!news.glorb.com!h18no3442339igc.0!news-out.google.com!gf2ni867igb.
0!nntp.google.com!h18no3442336igc.0!postnews.google.com!glegroupsg2000goo.googl
egroups.com!not-for-mail
Newsgroups: pl.comp.programming
Date: Fri, 18 Jul 2014 00:15:48 -0700 (PDT)
In-Reply-To: <lqaeff$l9l$1@node1.news.atman.pl>
Complaints-To: g...@g...com
Injection-Info: glegroupsg2000goo.googlegroups.com; posting-host=78.30.95.71;
posting-account=Sb6m8goAAABbWsBL7gouk3bfLsuxwMgN
NNTP-Posting-Host: 78.30.95.71
References: <lqaeff$l9l$1@node1.news.atman.pl>
User-Agent: G2/1.0
MIME-Version: 1.0
Message-ID: <2...@g...com>
Subject: Re: CUDA a OpenCL
From: firr <p...@g...com>
Injection-Date: Fri, 18 Jul 2014 07:15:48 +0000
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: quoted-printable
Xref: news-archive.icm.edu.pl pl.comp.programming:206357
[ ukryj nagłówki ]W dniu piątek, 18 lipca 2014 08:22:33 UTC+2 użytkownik Borneq napisał:
> Może warto zainteresować się OpenCL? CUDA działa tylko z kartami NVidia
>
> a OpenCL jest bardziej uniwersalny.
>
> Wynikało by z tego że CUDA jest bliżej sprzętu ale u kogoś OpenCL
>
> działał nawet szybciej.
>
> Druga rzecz - w czym to pisać? CUDA jest chyba obsługiwane przez jakiś
>
> nowy Visual C++ - 2012 czy jeszcze późniejszy.
>
> A OpenCL?
mozna sie zainteresowac, ja opencl em interesowalem sie tak z pol roku temu ale
odpalilem tylko prosty test, Kompilujesz program po prostu pod dllkę opencl.dll
aczkolwiek ten rozruchowy "knot" (czyli ten taki wezel wywolan api do setupu) byl dla
mnie strasznie meczacy - kiedys ludzie narzekali na to jak wyglada setup okna w
winapi, te stetupy
ogla czy ocl sa jeszcze gorsze
moge wkleic moje notatki z uruchomiania przykladow w ocl - przeklejone z jakiegos
tutoriala ale pamietam nabiedzilem sie pare dni by to uruchomic tak ze te notatki
moga byc przydatne "clew.c" to mala biblioteczka sciagnieta z netu
////////////////////////////////////////////////////
////////////////////////////
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
//#include <OpenCL/opencl.h>
#include "clew.c"
////////////////////////////////////////////////////
////////////////////////////
// Use a static data size for simplicity
//
#define DATA_SIZE (1024)
////////////////////////////////////////////////////
////////////////////////////
// Simple compute kernel which computes the square of an input array
//
const char *KernelSource = "\n" \
"__kernel void square( \n" \
" __global float* input, \n" \
" __global float* output, \n" \
" const unsigned int count) \n" \
"{ \n" \
" int i = get_global_id(0); \n" \
" if(i < count) \n" \
" output[i] = input[i] * input[i]; \n" \
"} \n" \
"\n";
////////////////////////////////////////////////////
////////////////////////////
int main(int argc, char** argv)
{
clewInit("OpenCl.dll");
int writeCLInfo();
writeCLInfo();
int err; // error code returned from api calls
float data[DATA_SIZE]; // original data set given to device
float results[DATA_SIZE]; // results returned from device
unsigned int correct; // number of correct results returned
size_t global; // global domain size for our calculation
size_t local; // local domain size for our calculation
cl_device_id device_id; // compute device id
cl_context context; // compute context
cl_command_queue commands; // compute command queue
cl_program program; // compute program
cl_kernel kernel; // compute kernel
cl_mem input; // device memory used for the input array
cl_mem output; // device memory used for the output array
// Fill our data set with random float values
//
int i = 0;
unsigned int count = DATA_SIZE;
for(i = 0; i < count; i++)
data[i] = rand() / (float)RAND_MAX;
//////// platform
static cl_platform_id platform_id[10] = {0};
int no_of_platforms_found = 0;
int ret = clGetPlatformIDs(10, platform_id, &no_of_platforms_found);
if(ret == CL_SUCCESS ) printf("clGetPlatformIDs success") ;
printf(" %d platforms found\n",no_of_platforms_found ) ;
// Connect to a compute device
//
int gpu = 1;
err = clGetDeviceIDs(platform_id[0], gpu ? CL_DEVICE_TYPE_GPU :
CL_DEVICE_TYPE_CPU, 1, &device_id, NULL);
if (err != CL_SUCCESS)
{
printf("Error: Failed to create a device group!\n");
return EXIT_FAILURE;
}
// Create a compute context
//
context = clCreateContext(0, 1, &device_id, NULL, NULL, &err);
if (!context)
{
printf("Error: Failed to create a compute context!\n");
return EXIT_FAILURE;
}
// Create a command commands
//
commands = clCreateCommandQueue(context, device_id, 0, &err);
if (!commands)
{
printf("Error: Failed to create a command commands!\n");
return EXIT_FAILURE;
}
// Create the compute program from the source buffer
//
program = clCreateProgramWithSource(context, 1, (const char **) & KernelSource,
NULL, &err);
if (!program)
{
printf("Error: Failed to create compute program!\n");
return EXIT_FAILURE;
}
// Build the program executable
//
err = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
if (err != CL_SUCCESS)
{
size_t len;
char buffer[2048];
printf("Error: Failed to build program executable!\n");
clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG,
sizeof(buffer), buffer, &len);
printf("%s\n", buffer);
exit(1);
}
// Create the compute kernel in the program we wish to run
//
kernel = clCreateKernel(program, "square", &err);
if (!kernel || err != CL_SUCCESS)
{
printf("Error: Failed to create compute kernel!\n");
exit(1);
}
// Create the input and output arrays in device memory for our calculation
//
input = clCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(float) * count, NULL,
NULL);
output = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(float) * count, NULL,
NULL);
if (!input || !output)
{
printf("Error: Failed to allocate device memory!\n");
exit(1);
}
// Write our data set into the input array in device memory
//
err = clEnqueueWriteBuffer(commands, input, CL_TRUE, 0, sizeof(float) * count,
data, 0, NULL, NULL);
if (err != CL_SUCCESS)
{
printf("Error: Failed to write to source array!\n");
exit(1);
}
// Set the arguments to our compute kernel
//
err = 0;
err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &input);
err |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &output);
err |= clSetKernelArg(kernel, 2, sizeof(unsigned int), &count);
if (err != CL_SUCCESS)
{
printf("Error: Failed to set kernel arguments! %d\n", err);
exit(1);
}
// Get the maximum work group size for executing the kernel on the device
//
err = clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_WORK_GROUP_SIZE,
sizeof(local), &local, NULL);
if (err != CL_SUCCESS)
{
printf("Error: Failed to retrieve kernel work group info! %d\n", err);
exit(1);
}
// Execute the kernel over the entire range of our 1d input data set
// using the maximum number of work group items for this device
//
global = count;
err = clEnqueueNDRangeKernel(commands, kernel, 1, NULL, &global, &local, 0, NULL,
NULL);
if (err)
{
printf("Error: Failed to execute kernel!\n");
return EXIT_FAILURE;
}
// Wait for the command commands to get serviced before reading back results
//
clFinish(commands);
// Read back the results from the device to verify the output
//
err = clEnqueueReadBuffer( commands, output, CL_TRUE, 0, sizeof(float) * count,
results, 0, NULL, NULL );
if (err != CL_SUCCESS)
{
printf("Error: Failed to read output array! %d\n", err);
exit(1);
}
// Validate our results
//
correct = 0;
for(i = 0; i < count; i++)
{
if(results[i] - data[i] * data[i]<0.001)
correct++;
}
for( i=0; i<1024; i++)
{
printf("data %f square %f \n", data[i], data[i]*data[i]);
printf("result %f \n", results[i]);
}
// Print a brief summary detailing the results
//
printf("Computed '%d/%d' correct values!\n", correct, count);
// Shutdown and cleanup
//
clReleaseMemObject(input);
clReleaseMemObject(output);
clReleaseProgram(program);
clReleaseKernel(kernel);
clReleaseCommandQueue(commands);
clReleaseContext(context);
return 0;
}
///////////////////
int writeCLInfo()
{
int i, j;
char* value;
size_t valueSize;
cl_uint platformCount;
cl_platform_id* platforms;
cl_uint deviceCount;
cl_device_id* devices;
cl_uint maxComputeUnits;
// get all platforms
clGetPlatformIDs(0, NULL, &platformCount);
platforms = (cl_platform_id*) malloc(sizeof(cl_platform_id) * platformCount);
clGetPlatformIDs(platformCount, platforms, NULL);
for (i = 0; i < platformCount; i++) {
// get all devices
clGetDeviceIDs(platforms[i], CL_DEVICE_TYPE_ALL, 0, NULL, &deviceCount);
devices = (cl_device_id*) malloc(sizeof(cl_device_id) * deviceCount);
clGetDeviceIDs(platforms[i], CL_DEVICE_TYPE_ALL, deviceCount, devices, NULL);
// for each device print critical attributes
for (j = 0; j < deviceCount; j++) {
// print device name
clGetDeviceInfo(devices[j], CL_DEVICE_NAME, 0, NULL, &valueSize);
value = (char*) malloc(valueSize);
clGetDeviceInfo(devices[j], CL_DEVICE_NAME, valueSize, value, NULL);
printf("%d. Device: %s\n", j+1, value);
free(value);
// print hardware device version
clGetDeviceInfo(devices[j], CL_DEVICE_VERSION, 0, NULL, &valueSize);
value = (char*) malloc(valueSize);
clGetDeviceInfo(devices[j], CL_DEVICE_VERSION, valueSize, value, NULL);
printf(" %d.%d Hardware version: %s\n", j+1, 1, value);
free(value);
// print software driver version
clGetDeviceInfo(devices[j], CL_DRIVER_VERSION, 0, NULL, &valueSize);
value = (char*) malloc(valueSize);
clGetDeviceInfo(devices[j], CL_DRIVER_VERSION, valueSize, value, NULL);
printf(" %d.%d Software version: %s\n", j+1, 2, value);
free(value);
// print c version supported by compiler for device
clGetDeviceInfo(devices[j], CL_DEVICE_OPENCL_C_VERSION, 0, NULL,
&valueSize);
value = (char*) malloc(valueSize);
clGetDeviceInfo(devices[j], CL_DEVICE_OPENCL_C_VERSION, valueSize, value,
NULL);
printf(" %d.%d OpenCL C version: %s\n", j+1, 3, value);
free(value);
// print parallel compute units
clGetDeviceInfo(devices[j], CL_DEVICE_MAX_COMPUTE_UNITS,
sizeof(maxComputeUnits), &maxComputeUnits, NULL);
printf(" %d.%d Parallel compute units: %d\n", j+1, 4, maxComputeUnits);
}
free(devices);
}
free(platforms);
return 0;
}
Następne wpisy z tego wątku
Najnowsze wątki z tej grupy
- Nowa ustawa o ochronie praw autorskich - opis problemu i szkic ustawy
- Alg. kompresji LZW
- Popr. 14. Nauka i Praca Programisty C++ w III Rzeczy (pospolitej)
- Arch. Prog. Nieuprzywilejowanych w pełnej wer. na nowej s. WWW energokod.pl
- 7. Raport Totaliztyczny: Sprawa Qt Group wer. 424
- TCL - problem z escape ostatniego \ w nawiasach {}
- Nauka i Praca Programisty C++ w III Rzeczy (pospolitej)
- testy-wyd-sort - Podsumowanie
- Tworzenie Programów Nieuprzywilejowanych Opartych Na Wtyczkach
- Do czego nadaje się QDockWidget z bibl. Qt?
- Bibl. Qt jest sztucznie ograniczona - jest nieprzydatna do celów komercyjnych
- Co sciaga kretynow
- AEiC 2024 - Ada-Europe conference - Deadlines Approaching
- Jakie są dobre zasady programowania programów opartych na wtyczkach?
- sprawdzanie słów kluczowych dot. zła
Najnowsze wątki
- 2025-03-18 Tesla na złom
- 2025-03-18 Ziobrotura 3.0 będzie w prawie przesłuchać "świadka" Tuska bez adwokata w sprawach Sienkiewicza/Bodnara/...?
- 2025-03-18 Produkty ,,Made in Germany" wciąż na topie - art. na www.dw.com
- 2025-03-18 ulaskawienia
- 2025-03-18 Gdynia => Sales Executive / KAM <=
- 2025-03-18 42 MILIARDY ZŁOTYCH ZYSKU W ROK. DLACZEGO BANKI TYLE ZARABIAJĄ W POLSCE?
- 2025-03-17 Nie matura lecz chęć szczera ...
- 2025-03-17 Pendrive zdycha, czy coś jeszcze innego? Problem z plikami.
- 2025-03-17 Odkurzacz Smapp Dynamic - dawny Zelmer
- 2025-03-17 Nagra IV i zewnętrzny pilot
- 2025-03-17 Rzeszów => Spedytor Międzynarodowy <=
- 2025-03-17 Warszawa => Junior Account Manager <=
- 2025-03-17 Białystok => Gen AI Engineer <=
- 2025-03-17 Białystok => Generative AI Engineer <=
- 2025-03-17 Częstochowa => Backend Developer (Node + Java) <=