<td style="width:10%" class="tablecell_title">Time (s)</td>
</tr>
<tr>
- <td style="width:30%" class="tablecell_success">2</td>
+ <td style="width:30%" class="tablecell_success">6</td>
<td style="width:30%" class="tablecell_success">0</td>
<td style="width:30%" class="tablecell_success">100%</td>
- <td style="width:10%" class="tablecell_success">0.000000</td>
+ <td style="width:10%" class="tablecell_success">3.000000</td>
</tr>
</table>
<hr />
<td style="width:10%" class="tablecell_success">100%</td>
<td style="width:10%" class="tablecell_success">0.000000</td>
</tr>
+ <tr>
+ <td class="tablecell_success"><a href="#TestSaucisson">TestSaucisson</a></td>
+ <td style="width:10%" class="tablecell_success">1</td>
+ <td style="width:10%" class="tablecell_success">0</td>
+ <td style="width:10%" class="tablecell_success">100%</td>
+ <td style="width:10%" class="tablecell_success">0.000000</td>
+ </tr>
+ <tr>
+ <td class="tablecell_success"><a href="#TestProduitScalaire">TestProduitScalaire</a></td>
+ <td style="width:10%" class="tablecell_success">1</td>
+ <td style="width:10%" class="tablecell_success">0</td>
+ <td style="width:10%" class="tablecell_success">100%</td>
+ <td style="width:10%" class="tablecell_success">1.000000</td>
+ </tr>
+ <tr>
+ <td class="tablecell_success"><a href="#TestHistogramme">TestHistogramme</a></td>
+ <td style="width:10%" class="tablecell_success">1</td>
+ <td style="width:10%" class="tablecell_success">0</td>
+ <td style="width:10%" class="tablecell_success">100%</td>
+ <td style="width:10%" class="tablecell_success">2.000000</td>
+ </tr>
+ <tr>
+ <td class="tablecell_success"><a href="#TestMonteCarlo">TestMonteCarlo</a></td>
+ <td style="width:10%" class="tablecell_success">1</td>
+ <td style="width:10%" class="tablecell_success">0</td>
+ <td style="width:10%" class="tablecell_success">100%</td>
+ <td style="width:10%" class="tablecell_success">0.000000</td>
+ </tr>
</table>
<hr />
</table>
<p class="spaced"><a href="#top">Back to top</a>
</p>
+<h3><a name="TestSaucisson"></a>Suite: TestSaucisson</h3>
+<table summary="Details for suite TestSaucisson" class="table_suite">
+ <tr>
+ <td class="tablecell_title">Name</td>
+ <td style="width:10%" class="tablecell_title">Errors</td>
+ <td style="width:10%" class="tablecell_title">Success</td>
+ <td style="width:10%" class="tablecell_title">Time (s)</td>
+ </tr>
+ <tr>
+ <td class="tablecell_success">testSaucissonCuda</td>
+ <td class="tablecell_success">0</td>
+ <td class="tablecell_success">true</td>
+ <td class="tablecell_success">0.000000</td>
+ </tr>
+</table>
+<p class="spaced"><a href="#top">Back to top</a>
+</p>
+<h3><a name="TestProduitScalaire"></a>Suite: TestProduitScalaire</h3>
+<table summary="Details for suite TestProduitScalaire" class="table_suite">
+ <tr>
+ <td class="tablecell_title">Name</td>
+ <td style="width:10%" class="tablecell_title">Errors</td>
+ <td style="width:10%" class="tablecell_title">Success</td>
+ <td style="width:10%" class="tablecell_title">Time (s)</td>
+ </tr>
+ <tr>
+ <td class="tablecell_success">testProduitScalaire</td>
+ <td class="tablecell_success">0</td>
+ <td class="tablecell_success">true</td>
+ <td class="tablecell_success">1.000000</td>
+ </tr>
+</table>
+<p class="spaced"><a href="#top">Back to top</a>
+</p>
+<h3><a name="TestHistogramme"></a>Suite: TestHistogramme</h3>
+<table summary="Details for suite TestHistogramme" class="table_suite">
+ <tr>
+ <td class="tablecell_title">Name</td>
+ <td style="width:10%" class="tablecell_title">Errors</td>
+ <td style="width:10%" class="tablecell_title">Success</td>
+ <td style="width:10%" class="tablecell_title">Time (s)</td>
+ </tr>
+ <tr>
+ <td class="tablecell_success">testHistogramme</td>
+ <td class="tablecell_success">0</td>
+ <td class="tablecell_success">true</td>
+ <td class="tablecell_success">2.000000</td>
+ </tr>
+</table>
+<p class="spaced"><a href="#top">Back to top</a>
+</p>
+<h3><a name="TestMonteCarlo"></a>Suite: TestMonteCarlo</h3>
+<table summary="Details for suite TestMonteCarlo" class="table_suite">
+ <tr>
+ <td class="tablecell_title">Name</td>
+ <td style="width:10%" class="tablecell_title">Errors</td>
+ <td style="width:10%" class="tablecell_title">Success</td>
+ <td style="width:10%" class="tablecell_title">Time (s)</td>
+ </tr>
+ <tr>
+ <td class="tablecell_success">testMonteCarlo</td>
+ <td class="tablecell_success">0</td>
+ <td class="tablecell_success">true</td>
+ <td class="tablecell_success">0.000000</td>
+ </tr>
+</table>
+<p class="spaced"><a href="#top">Back to top</a>
+</p>
<hr />
{
const float i = s + 1;
const float xi = -1 + i * deltaX;
- threadResult += sqrtf(1-xi*xi);
+ threadResult += sqrtf(1 - xi * xi);
s += NB_THREAD;
}
const int TID_LOCAL = Indice1D::tidLocal();
if (TID_LOCAL == 0)
{
- atomicAdd(ptrResult, float(tabSM[0]));
+ atomicAdd(ptrResult, tabSM[0]);
}
}
bool saucisson()
{
- cout << "saucisson() ..." << endl;
+ cout << endl << "saucisson() ..." << endl;
// Nombre d'échantillon. Au-delà, la qualité du résultat n'est pas meilleure. Il faudrait employé des doubles à la place de floats.
const int N = 100000;
// Allocation coté GPU en global memory (GM).
- float* ptrDevResult = 0;
+ float* ptrDevResult;
HANDLE_ERROR(cudaMalloc(&ptrDevResult, sizeof(float)));
HANDLE_ERROR(cudaMemset(ptrDevResult, 0, sizeof(float)));
bool produitScalaire()
{
- cout << "produitScalaire() ..." << endl;
+ cout << endl << "produitScalaire() ..." << endl;
const int N = 10000000; // Taille des deux vecteurs : 10 * 10^6.
cout << "Résultat : " << res << endl;
cout << "Résultat théorique : " << resTheo << endl;
cout << "Différence absolue : " << resTheo - res << endl;
- cout << "Différence relatif : " << 100 * (resTheo - res) / (resTheo + res) << " %" << endl;
+ cout << "Différence relative : " << 100 * (resTheo - res) / (resTheo + res) << " %" << endl;
return true;
}
bool histogramme()
{
- cout << "hisogramme() ..." << endl;
+ cout << endl << "histogramme() ..." << endl;
Chronos chronos;
chronos.start();
--- /dev/null
+#include "MonteCarlo.h"
+
+#include <iostream>
+#include <cmath>
+#include <stdio.h>
+using namespace std;
+
+#include <curand_kernel.h>
+
+#include "Indice1D.h"
+#include "cudaTools.h"
+#include "Device.h"
+#include "Lock.h"
+
+// Paramètres pour le calcul de pi.
+const float X_MIN = 0;
+const float X_MAX = 1;
+const float M = 1;
+
+/*
+ * 1) Chaque thread calcule un résultat intermediaire qu'il va ensuite placer en shared memory.
+ * n: Nombre d'échantillon.
+ */
+__device__
+void mc_reductionIntraThread(int n, uint* tabSM, curandState* tabGeneratorThread)
+ {
+ const int NB_THREAD = Indice1D::nbThread();
+ const int TID = Indice1D::tid();
+ const int TID_LOCAL = Indice1D::tidLocal();
+
+ curandState& localState = tabGeneratorThread[TID];
+
+ uint threadResult = 0;
+ int s = TID;
+ while (s < n)
+ {
+ // Tire un point aléatoire entre (0,0) et (1,1), voir X_MIN, X_MAX et M.
+ const float x = curand_uniform(&localState);
+ const float y = curand_uniform(&localState);
+ const float fx = sqrtf(1 - x * x);
+
+ // Si y est sous la courbe alors on le comptabilise.
+ if (y < fx)
+ threadResult += 1;
+
+ s += NB_THREAD;
+ }
+
+ tabSM[TID_LOCAL] = threadResult;
+ }
+
+/*
+ * Combine les résultats de 'tabSM' dans 'tabSM[0]'
+ */
+__device__
+void mc_combine(uint* tabSM, int middle)
+ {
+ const int TID_LOCAL = Indice1D::tidLocal();
+ const int NB_THREAD_LOCAL = Indice1D::nbThreadBlock();
+
+ int s = TID_LOCAL;
+ while (s < middle)
+ {
+ tabSM[s] += tabSM[s + middle];
+ s += NB_THREAD_LOCAL;
+ }
+ }
+
+/*
+ * 2) La shared memory est réduite, le résultat est placé dans 'tabSM[0]'.
+ */
+__device__
+void mc_reductionIntraBlock(uint* tabSM)
+ {
+ const int TAB_SIZE = blockDim.x;
+ int middle = TAB_SIZE / 2;
+
+ while (middle > 0)
+ {
+ mc_combine(tabSM, middle);
+ middle /= 2;
+ __syncthreads(); // Synchronisation des threads au niveau du bloc.
+ }
+ }
+
+/*
+ * 3) Le 'tabSM[0]' de chaque bloc est reduit dans 'ptrResult' qui se trouve en global memory.
+ */
+__device__
+void mc_reductionInterBlock(uint* tabSM, uint* ptrResult)
+ {
+ const int TID_LOCAL = Indice1D::tidLocal();
+ if (TID_LOCAL == 0)
+ {
+ atomicAdd(ptrResult, tabSM[0]);
+ }
+ }
+
+/**
+ * La taille de la shared memory (en terme de # de sizeof(uint)) doit
+ * être égal à la taille des blocs.
+ * n: le nombre de point à tiré aléatoirement.
+ * ptrResult: Le resultat du calcul.
+ * tabGeneratorThread: les generateurs aléatoires
+ */
+__global__
+void monteCarlo(int n, uint* ptrResult, curandState* tabGeneratorThread)
+ {
+ extern __shared__ uint tabSM[]; // Dynamic shared memory.
+
+ // 1) Réduction intra-thread.
+ mc_reductionIntraThread(n, tabSM, tabGeneratorThread);
+
+ __syncthreads();
+
+ // 2) Réduction intra-block.
+ mc_reductionIntraBlock(tabSM);
+
+ // 3) Réduction inter-block.
+ mc_reductionInterBlock(tabSM, ptrResult);
+ }
+
+__global__
+void setupKernelRand(curandState* tabGeneratorThread, int deviceId)
+ {
+ int tid = Indice1D::tid();
+ int deltaSeed = deviceId * INT_MAX;
+ int deltaSequence = deviceId * 100;
+ int deltaOffset = deviceId * 100;
+
+ int seed = 1234 + deltaSeed;
+ int sequenceNumber = tid + deltaSequence;
+ int offset = deltaOffset;
+ curand_init(seed, sequenceNumber, offset, &tabGeneratorThread[tid]);
+ }
+
+bool monteCarlo()
+ {
+ cout << endl << "monteCarlo() ..." << endl;
+
+ // Nombre de point total tiré aléatoirement.
+ const int N = 1000000;
+
+ // Allocation coté GPU en global memory (GM).
+ uint* ptrDevResult;
+ HANDLE_ERROR(cudaMalloc(&ptrDevResult, sizeof(uint)));
+ HANDLE_ERROR(cudaMemset(ptrDevResult, 0, sizeof(uint)));
+
+ // Paramètre de l'appel de la fonction sur le device.
+ const dim3 dg(256, 1, 1);
+ const dim3 db(256, 1, 1);
+ Device::assertDim(dg, db);
+ const size_t SMSize = db.x * sizeof(uint); // 256 uint;
+
+ // Gestion des générateurs aléatoires (un par thread).
+ curandState* tabGeneratorThread;
+ HANDLE_ERROR(cudaMalloc(&tabGeneratorThread, db.x * dg.x * sizeof(curandState)));
+ setupKernelRand<<<dg, db>>>(tabGeneratorThread, 0); // Device 0 (à utilise si utilisation en MP).
+ cudaDeviceSynchronize();
+
+ monteCarlo<<<dg, db, SMSize>>>(N, ptrDevResult, tabGeneratorThread);
+
+ // cudaDeviceSynchronize(); // Utilisé pour flusher les prints sur le stdout à partir du device (debug).
+
+ uint nb;
+ // Barrière implicite de synchronisation ('cudaMemCpy').
+ HANDLE_ERROR(cudaMemcpy(&nb, ptrDevResult, sizeof(uint), cudaMemcpyDeviceToHost));
+
+ double integrale = (double)nb / (double)N * (X_MAX - X_MIN) * M;
+ double pi = 4 * integrale;
+
+ cout.precision(20);
+ cout << "Approximation de PI : " << pi << endl;
+
+ return true;
+ }
--- /dev/null
+#ifndef MONTE_CARLO_H
+#define MONTE_CARLO_H
+
+bool monteCarlo();
+
+#endif
#include <iostream>
#include <stdlib.h>
+#include <algorithm>
using namespace std;
#include "01c_Saucisson/Saucisson.h"
#include "02_ProduitScalaire/ProduitScalaire.h"
#include "03_Histogramme/Histogramme.h"
+#include "04_MonteCarlo/MonteCarlo.h"
extern bool useHello();
extern bool addVectors();
-int mainCore()
+int mainCore(const vector<string>& args)
{
bool isOk = true;
+
+ // Exemples simples.
//isOk &= useHello();
//isOk &= addVectors();
- //isOk &= produitScalaire();
- //isOk &= saucisson();
- //isOk &= histogramme();
+
+ if (find(args.begin(), args.end(), "saucisson") != args.end())
+ isOk &= saucisson();
+
+ if (find(args.begin(), args.end(), "produit-scalaire") != args.end())
+ isOk &= produitScalaire();
+
+ if (find(args.begin(), args.end(), "histogramme") != args.end())
+ isOk &= histogramme();
+
+ if (find(args.begin(), args.end(), "monte-carlo") != args.end())
+ isOk &= monteCarlo();
cout << "\nisOK = " << isOk << endl;
cout << "\nEnd : mainCore" << endl;
#include <iostream>
+#include <vector>
#include <stdlib.h>
#include <assert.h>
+using namespace std;
#include "cudaTools.h"
#include "Device.h"
#include "LimitsTools.h"
-using std::cout;
-using std::endl;
-
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
|* Imported *|
\*-------------------------------------*/
-extern int mainCore();
+extern int mainCore(const vector<string>& args);
extern int mainTest();
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
-int main(void);
+int main(int argc, char** argv);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static void initCuda(int deviceId);
-static int start(void);
+static int start(const vector<string>& args);
/*----------------------------------------------------------------------*\
|* Implementation *|
|* Public *|
\*-------------------------------------*/
-int main(void)
+int main(int argc, char** argv)
{
cout << "main" << endl;
+ vector<string> args;
+ for (int i = 1; i < argc; ++i)
+ args.push_back(argv[i]);
+
// LimitsTools::rappelTypeSize();
if (Device::isCuda())
// Server Cuda2: in [0,2]
int deviceId = 0;
- int isOk = start();
+ int isOk = start(args);
//cudaDeviceReset causes the driver to clean up all state.
// While not mandatory in normal operation, it is good practice.
// Device::loadCudaDriverAll();// Force driver to be load for all GPU
}
-int start(void)
+int start(const vector<string>& args)
{
Device::printCurrent();
- bool IS_TEST = false;
-
- if (IS_TEST)
- {
+ if (args.size() == 0 || args[0] == "tests")
+ {
return mainTest();
}
else
{
- return mainCore();
+ return mainCore(args);
}
}
--- /dev/null
+#include "TestSaucisson.h"
+
+#include "Saucisson.h"
+
+TestSaucisson::TestSaucisson(int deviceId) :
+ deviceId(deviceId)
+ {
+ TEST_ADD(TestSaucisson::testSaucissonCuda);
+ }
+
+void TestSaucisson::testSaucissonCuda(void)
+ {
+ TEST_ASSERT(saucisson());
+ }
--- /dev/null
+#ifndef TEST_SAUCISSON_H
+#define TEST_SAUCISSON_H
+
+#include "cpptest.h"
+using Test::Suite;
+
+class TestSaucisson : public Suite
+ {
+ public:
+ TestSaucisson(int deviceId);
+
+ private:
+ void testSaucissonCuda();
+
+ private:
+ int deviceId;
+ };
+
+#endif
--- /dev/null
+#include "TestProduitScalaire.h"
+
+#include "ProduitScalaire.h"
+
+TestProduitScalaire::TestProduitScalaire(int deviceId) :
+ deviceId(deviceId)
+ {
+ TEST_ADD(TestProduitScalaire::testProduitScalaire);
+ }
+
+void TestProduitScalaire::testProduitScalaire(void)
+ {
+ TEST_ASSERT(produitScalaire());
+ }
--- /dev/null
+#ifndef TEST_PRODUIT_SCALAIRE_H
+#define TEST_PRODUIT_SCALAIRE_H
+
+#include "cpptest.h"
+using Test::Suite;
+
+class TestProduitScalaire : public Suite
+ {
+ public:
+ TestProduitScalaire(int deviceId);
+
+ private:
+ void testProduitScalaire();
+
+ private:
+ int deviceId;
+ };
+
+#endif
--- /dev/null
+#include "TestHistogramme.h"
+
+#include "Histogramme.h"
+
+TestHistogramme::TestHistogramme(int deviceId) :
+ deviceId(deviceId)
+ {
+ TEST_ADD(TestHistogramme::testHistogramme);
+ }
+
+void TestHistogramme::testHistogramme(void)
+ {
+ TEST_ASSERT(histogramme());
+ }
--- /dev/null
+#ifndef TEST_HISTOGRAMME_H
+#define TEST_HISTOGRAMME_H
+
+#include "cpptest.h"
+using Test::Suite;
+
+class TestHistogramme : public Suite
+ {
+ public:
+ TestHistogramme(int deviceId);
+
+ private:
+ void testHistogramme();
+
+ private:
+ int deviceId;
+ };
+
+#endif
--- /dev/null
+#include "TestMonteCarlo.h"
+
+#include "MonteCarlo.h"
+
+TestMonteCarlo::TestMonteCarlo(int deviceId) :
+ deviceId(deviceId)
+ {
+ TEST_ADD(TestMonteCarlo::testMonteCarlo);
+ }
+
+void TestMonteCarlo::testMonteCarlo(void)
+ {
+ TEST_ASSERT(monteCarlo());
+ }
--- /dev/null
+#ifndef TEST_MONTE_CARLO_H
+#define TEST_MONTE_CARLO_H
+
+#include "cpptest.h"
+using Test::Suite;
+
+class TestMonteCarlo : public Suite
+ {
+ public:
+ TestMonteCarlo(int deviceId);
+
+ private:
+ void testMonteCarlo();
+
+ private:
+ int deviceId;
+ };
+
+#endif
#include "cudaTools.h"
#include "TestHello.h"
-
+#include "TestSaucisson.h"
+#include "TestProduitScalaire.h"
+#include "TestHistogramme.h"
+#include "TestMonteCarlo.h"
using std::string;
using std::cout;
Suite testSuite;
testSuite.add(std::auto_ptr<Suite>(new TestHello(deviceId)));
+ testSuite.add(std::auto_ptr<Suite>(new TestSaucisson(deviceId)));
+ testSuite.add(std::auto_ptr<Suite>(new TestProduitScalaire(deviceId)));
+ testSuite.add(std::auto_ptr<Suite>(new TestHistogramme(deviceId)));
+ testSuite.add(std::auto_ptr<Suite>(new TestMonteCarlo(deviceId)));
string titre = "deviceId_" + StringTools::toString(deviceId);
/*-----------------*\\r
|* static *|\r
\*----------------*/\r
-\r
Rippling* RipplingProvider::createMOO()\r
{\r
const float dt = 1;\r
GLUTImageViewers viewer;
public:
- AutoViewer(bool isAnimation, bool isSelection, int pxFrame, int pyFrame):
+ AutoViewer(bool isAnimation, bool isSelection, int pxFrame, int pyFrame, bool run = true):
ptrOutput(TProvider::createGL()),
viewer(ptrOutput, isAnimation, isSelection, pxFrame, pyFrame)
{
+ if (run)
+ GLUTImageViewers::runALL();
}
~AutoViewer()
GLUTImageViewers viewer;
public:
- Viewer(TOutput* output, bool isAnimation, bool isSelection, int pxFrame, int pyFrame):
+ Viewer(TOutput* output, bool isAnimation, bool isSelection, int pxFrame, int pyFrame, bool run = true):
ptrOutput(output),
viewer(ptrOutput, isAnimation, isSelection, pxFrame, pyFrame)
{
+ if (run)
+ GLUTImageViewers::runALL();
}
~Viewer()
\r
#include "Viewer.h"\r
\r
-int mainGL(void)\r
+int mainGL(const vector<string>& args)\r
{\r
- // AutoViewer<Rippling0Image, Rippling0Provider> rippling0(true, true, 10, 10);\r
- // AutoViewer<Image, RipplingProvider> rippling0(true, true, 10, 10);\r
- // Viewer<ImageFonctionel> fractalMandelbrot(MandelbrotProvider::createGL(true), true, true, 20, 20);\r
- // AutoViewer<ImageFonctionel, JuliaProvider> fractalJulia(true, true, 30, 30);\r
- // AutoViewer<ImageFonctionel, NewtonProvider> newtown(true, true, 20, 20);\r
- // AutoViewer<Image, HeatTransfertProvider> heatTransfert(true, false, 20, 20);\r
- // AutoViewer<ImageFonctionel, RayTracingProvider> rayTracing(true, true, 20, 20);\r
- Viewer<Image> convolution(ConvolutionProvider::createGL("/media/Data/Video/nasaFHD_short.avi"), true, true, 20, 20);\r
-\r
- GLUTImageViewers::runALL(); // Bloquant, Tant qu'une fenêtre est ouverte.\r
+ const string defaultCommand = "demo";\r
+ const string command = args.size() > 0 ? args[0] : defaultCommand;\r
+\r
+ // AutoViewer<Rippling0Image, Rippling0Provider> rippling0(true, true, 10, 10); // Warmup.\r
+\r
+ if (command == "rippling")\r
+ {\r
+ AutoViewer<Image, RipplingProvider> rippling(true, true, 10, 10);\r
+ }\r
+ else if (command == "mandelbrot")\r
+ {\r
+ const bool multiGPU = args.size() >= 2 && args[1] == "--mp";\r
+ Viewer<ImageFonctionel> fractalMandelbrot(MandelbrotProvider::createGL(multiGPU), true, true, 10, 10);\r
+ }\r
+ else if (command == "julia")\r
+ AutoViewer<ImageFonctionel, JuliaProvider> fractalJulia(true, true, 10, 10);\r
+ else if (command == "newton")\r
+ AutoViewer<ImageFonctionel, NewtonProvider> newtown(true, true, 10, 10);\r
+ else if (command == "heat-transfert")\r
+ AutoViewer<Image, HeatTransfertProvider> heatTransfert(true, false, 10, 10);\r
+ else if (command == "convolution")\r
+ {\r
+ const string videoPath = args.size() >= 2 ? args[1] : "/media/Data/Video/nasaFHD_short.avi"; // Vidéo par défaut si pas donnée en paramètre.\r
+ Viewer<Image> convolution(ConvolutionProvider::createGL(videoPath), true, true, 10, 10);\r
+ }\r
+ else if (command == "demo")\r
+ {\r
+ Viewer<Image> convolution(ConvolutionProvider::createGL("/media/Data/Video/nasaFHD_short.avi"), true, true, 10, 10, false);\r
+ AutoViewer<Image, RipplingProvider> rippling(true, true, 60, 30, false);\r
+ Viewer<ImageFonctionel> fractalMandelbrot(MandelbrotProvider::createGL(false), true, true, 120, 60, false);\r
+ AutoViewer<ImageFonctionel, JuliaProvider> fractalJulia(true, true, 180, 80, false);\r
+ AutoViewer<ImageFonctionel, NewtonProvider> newtown(true, true, 260, 120, false);\r
+ AutoViewer<Image, HeatTransfertProvider> heatTransfert(true, false, 1200, 300, false);\r
+ GLUTImageViewers::runALL();\r
+ }\r
+ else\r
+ {\r
+ cout << "Command unknown: " << command << endl;\r
+ }\r
+\r
+ // AutoViewer<ImageFonctionel, RayTracingProvider> rayTracing(true, true, 20, 20); // Commenté car projet approfondit.\r
\r
return EXIT_SUCCESS;\r
}\r
#include <iostream>
+#include <vector>
#include <stdlib.h>
#include <assert.h>
+using namespace std;
#include "cudaTools.h"
#include "Device.h"
#include "cudaTools.h"
#include "GLUTImageViewers.h"
-using std::cout;
-using std::endl;
-
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
|* Imported *|
\*-------------------------------------*/
-extern int mainGL(void);
+extern int mainGL(const vector<string>& args);
extern int mainFreeGL(void);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
-static int start(void);
+static int start(const vector<string>& args);
static void initCuda(int deviceId);
/*----------------------------------------------------------------------*\
// Server Cuda2: in [0,2]
int deviceId = 0;
- int isOk = start();
+ vector<string> args;
+ for (int i = 1; i < argc; ++i)
+ args.push_back(argv[i]);
+
+ int isOk = start(args);
//cudaDeviceReset causes the driver to clean up all state.
// While not mandatory in normal operation, it is good practice.
// Choose current device (state of host-thread)
HANDLE_ERROR(cudaSetDevice(deviceId));
- // Enable Interoperabilité OpenGL:
+ // Enable Interoperabilit� OpenGL:
// - Create a cuda specifique contexte, shared between Cuda and GL
// - To be called before first call to kernel
// - cudaSetDevice ou cudaGLSetGLDevice are mutualy exclusive
// Device::loadCudaDriverAll();// Force driver to be load for all GPU
}
-int start(void)
+int start(const vector<string>& args)
{
Device::printCurrent();
if (IS_GL)
{
- return mainGL(); // Bloquant, Tant qu'une fenetre est ouverte
+ return mainGL(args); // Bloquant, Tant qu'une fenetre est ouverte
}
else
{