From: gburri Date: Tue, 13 Jan 2015 00:10:16 +0000 (+0100) Subject: Ajout des cas de tests pour les TP non-graphiques. Cleanage en tous genres. X-Git-Url: https://git.euphorik.ch/?a=commitdiff_plain;h=6664817ed89b0b616044da35a3eb8f715e0813d9;p=GPU.git Ajout des cas de tests pour les TP non-graphiques. Cleanage en tous genres. --- diff --git a/WCudaMSE/Student_Cuda/deviceId_0.html b/WCudaMSE/Student_Cuda/deviceId_0.html index 306cb24..cad5fd8 100755 --- a/WCudaMSE/Student_Cuda/deviceId_0.html +++ b/WCudaMSE/Student_Cuda/deviceId_0.html @@ -84,10 +84,10 @@ Designed by CppTest Time (s) - 2 + 6 0 100% - 0.000000 + 3.000000
@@ -108,6 +108,34 @@ Designed by CppTest 100% 0.000000 + + TestSaucisson + 1 + 0 + 100% + 0.000000 + + + TestProduitScalaire + 1 + 0 + 100% + 1.000000 + + + TestHistogramme + 1 + 0 + 100% + 2.000000 + + + TestMonteCarlo + 1 + 0 + 100% + 0.000000 +
@@ -134,6 +162,74 @@ Designed by CppTest

Back to top

+

Suite: TestSaucisson

+ + + + + + + + + + + + + +
NameErrorsSuccessTime (s)
testSaucissonCuda0true0.000000
+

Back to top +

+

Suite: TestProduitScalaire

+ + + + + + + + + + + + + +
NameErrorsSuccessTime (s)
testProduitScalaire0true1.000000
+

Back to top +

+

Suite: TestHistogramme

+ + + + + + + + + + + + + +
NameErrorsSuccessTime (s)
testHistogramme0true2.000000
+

Back to top +

+

Suite: TestMonteCarlo

+ + + + + + + + + + + + + +
NameErrorsSuccessTime (s)
testMonteCarlo0true0.000000
+

Back to top +


diff --git a/WCudaMSE/Student_Cuda/src/cpp/core/01c_Saucisson/Saucisson.cu b/WCudaMSE/Student_Cuda/src/cpp/core/01c_Saucisson/Saucisson.cu index 3ec1cd3..4d887a9 100644 --- a/WCudaMSE/Student_Cuda/src/cpp/core/01c_Saucisson/Saucisson.cu +++ b/WCudaMSE/Student_Cuda/src/cpp/core/01c_Saucisson/Saucisson.cu @@ -27,7 +27,7 @@ void reductionIntraThread(int n, float deltaX, float* tabSM) { const float i = s + 1; const float xi = -1 + i * deltaX; - threadResult += sqrtf(1-xi*xi); + threadResult += sqrtf(1 - xi * xi); s += NB_THREAD; } @@ -78,7 +78,7 @@ void reductionInterBlock(float* tabSM, float* ptrResult) const int TID_LOCAL = Indice1D::tidLocal(); if (TID_LOCAL == 0) { - atomicAdd(ptrResult, float(tabSM[0])); + atomicAdd(ptrResult, tabSM[0]); } } @@ -107,13 +107,13 @@ void saucisson(int n, float deltaX, float* ptrResult) bool saucisson() { - cout << "saucisson() ..." << endl; + cout << endl << "saucisson() ..." << endl; // Nombre d'échantillon. Au-delà, la qualité du résultat n'est pas meilleure. Il faudrait employé des doubles à la place de floats. const int N = 100000; // Allocation coté GPU en global memory (GM). - float* ptrDevResult = 0; + float* ptrDevResult; HANDLE_ERROR(cudaMalloc(&ptrDevResult, sizeof(float))); HANDLE_ERROR(cudaMemset(ptrDevResult, 0, sizeof(float))); diff --git a/WCudaMSE/Student_Cuda/src/cpp/core/02_ProduitScalaire/ProduitScalaire.cu b/WCudaMSE/Student_Cuda/src/cpp/core/02_ProduitScalaire/ProduitScalaire.cu index 429659b..0299506 100644 --- a/WCudaMSE/Student_Cuda/src/cpp/core/02_ProduitScalaire/ProduitScalaire.cu +++ b/WCudaMSE/Student_Cuda/src/cpp/core/02_ProduitScalaire/ProduitScalaire.cu @@ -163,7 +163,7 @@ double resultatTheorique(long n) bool produitScalaire() { - cout << "produitScalaire() ..." << endl; + cout << endl << "produitScalaire() ..." << endl; const int N = 10000000; // Taille des deux vecteurs : 10 * 10^6. @@ -192,7 +192,7 @@ bool produitScalaire() cout << "Résultat : " << res << endl; cout << "Résultat théorique : " << resTheo << endl; cout << "Différence absolue : " << resTheo - res << endl; - cout << "Différence relatif : " << 100 * (resTheo - res) / (resTheo + res) << " %" << endl; + cout << "Différence relative : " << 100 * (resTheo - res) / (resTheo + res) << " %" << endl; return true; } diff --git a/WCudaMSE/Student_Cuda/src/cpp/core/03_Histogramme/Histogramme.cu b/WCudaMSE/Student_Cuda/src/cpp/core/03_Histogramme/Histogramme.cu index 07bc577..9c1ef53 100644 --- a/WCudaMSE/Student_Cuda/src/cpp/core/03_Histogramme/Histogramme.cu +++ b/WCudaMSE/Student_Cuda/src/cpp/core/03_Histogramme/Histogramme.cu @@ -122,7 +122,7 @@ uchar* createTabInput(int size) bool histogramme() { - cout << "hisogramme() ..." << endl; + cout << endl << "histogramme() ..." << endl; Chronos chronos; chronos.start(); diff --git a/WCudaMSE/Student_Cuda/src/cpp/core/04_MonteCarlo/MonteCarlo.cu b/WCudaMSE/Student_Cuda/src/cpp/core/04_MonteCarlo/MonteCarlo.cu new file mode 100644 index 0000000..c51910e --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/core/04_MonteCarlo/MonteCarlo.cu @@ -0,0 +1,176 @@ +#include "MonteCarlo.h" + +#include +#include +#include +using namespace std; + +#include + +#include "Indice1D.h" +#include "cudaTools.h" +#include "Device.h" +#include "Lock.h" + +// Paramètres pour le calcul de pi. +const float X_MIN = 0; +const float X_MAX = 1; +const float M = 1; + +/* + * 1) Chaque thread calcule un résultat intermediaire qu'il va ensuite placer en shared memory. + * n: Nombre d'échantillon. + */ +__device__ +void mc_reductionIntraThread(int n, uint* tabSM, curandState* tabGeneratorThread) + { + const int NB_THREAD = Indice1D::nbThread(); + const int TID = Indice1D::tid(); + const int TID_LOCAL = Indice1D::tidLocal(); + + curandState& localState = tabGeneratorThread[TID]; + + uint threadResult = 0; + int s = TID; + while (s < n) + { + // Tire un point aléatoire entre (0,0) et (1,1), voir X_MIN, X_MAX et M. + const float x = curand_uniform(&localState); + const float y = curand_uniform(&localState); + const float fx = sqrtf(1 - x * x); + + // Si y est sous la courbe alors on le comptabilise. + if (y < fx) + threadResult += 1; + + s += NB_THREAD; + } + + tabSM[TID_LOCAL] = threadResult; + } + +/* + * Combine les résultats de 'tabSM' dans 'tabSM[0]' + */ +__device__ +void mc_combine(uint* tabSM, int middle) + { + const int TID_LOCAL = Indice1D::tidLocal(); + const int NB_THREAD_LOCAL = Indice1D::nbThreadBlock(); + + int s = TID_LOCAL; + while (s < middle) + { + tabSM[s] += tabSM[s + middle]; + s += NB_THREAD_LOCAL; + } + } + +/* + * 2) La shared memory est réduite, le résultat est placé dans 'tabSM[0]'. + */ +__device__ +void mc_reductionIntraBlock(uint* tabSM) + { + const int TAB_SIZE = blockDim.x; + int middle = TAB_SIZE / 2; + + while (middle > 0) + { + mc_combine(tabSM, middle); + middle /= 2; + __syncthreads(); // Synchronisation des threads au niveau du bloc. + } + } + +/* + * 3) Le 'tabSM[0]' de chaque bloc est reduit dans 'ptrResult' qui se trouve en global memory. + */ +__device__ +void mc_reductionInterBlock(uint* tabSM, uint* ptrResult) + { + const int TID_LOCAL = Indice1D::tidLocal(); + if (TID_LOCAL == 0) + { + atomicAdd(ptrResult, tabSM[0]); + } + } + +/** + * La taille de la shared memory (en terme de # de sizeof(uint)) doit + * être égal à la taille des blocs. + * n: le nombre de point à tiré aléatoirement. + * ptrResult: Le resultat du calcul. + * tabGeneratorThread: les generateurs aléatoires + */ +__global__ +void monteCarlo(int n, uint* ptrResult, curandState* tabGeneratorThread) + { + extern __shared__ uint tabSM[]; // Dynamic shared memory. + + // 1) Réduction intra-thread. + mc_reductionIntraThread(n, tabSM, tabGeneratorThread); + + __syncthreads(); + + // 2) Réduction intra-block. + mc_reductionIntraBlock(tabSM); + + // 3) Réduction inter-block. + mc_reductionInterBlock(tabSM, ptrResult); + } + +__global__ +void setupKernelRand(curandState* tabGeneratorThread, int deviceId) + { + int tid = Indice1D::tid(); + int deltaSeed = deviceId * INT_MAX; + int deltaSequence = deviceId * 100; + int deltaOffset = deviceId * 100; + + int seed = 1234 + deltaSeed; + int sequenceNumber = tid + deltaSequence; + int offset = deltaOffset; + curand_init(seed, sequenceNumber, offset, &tabGeneratorThread[tid]); + } + +bool monteCarlo() + { + cout << endl << "monteCarlo() ..." << endl; + + // Nombre de point total tiré aléatoirement. + const int N = 1000000; + + // Allocation coté GPU en global memory (GM). + uint* ptrDevResult; + HANDLE_ERROR(cudaMalloc(&ptrDevResult, sizeof(uint))); + HANDLE_ERROR(cudaMemset(ptrDevResult, 0, sizeof(uint))); + + // Paramètre de l'appel de la fonction sur le device. + const dim3 dg(256, 1, 1); + const dim3 db(256, 1, 1); + Device::assertDim(dg, db); + const size_t SMSize = db.x * sizeof(uint); // 256 uint; + + // Gestion des générateurs aléatoires (un par thread). + curandState* tabGeneratorThread; + HANDLE_ERROR(cudaMalloc(&tabGeneratorThread, db.x * dg.x * sizeof(curandState))); + setupKernelRand<<>>(tabGeneratorThread, 0); // Device 0 (à utilise si utilisation en MP). + cudaDeviceSynchronize(); + + monteCarlo<<>>(N, ptrDevResult, tabGeneratorThread); + + // cudaDeviceSynchronize(); // Utilisé pour flusher les prints sur le stdout à partir du device (debug). + + uint nb; + // Barrière implicite de synchronisation ('cudaMemCpy'). + HANDLE_ERROR(cudaMemcpy(&nb, ptrDevResult, sizeof(uint), cudaMemcpyDeviceToHost)); + + double integrale = (double)nb / (double)N * (X_MAX - X_MIN) * M; + double pi = 4 * integrale; + + cout.precision(20); + cout << "Approximation de PI : " << pi << endl; + + return true; + } diff --git a/WCudaMSE/Student_Cuda/src/cpp/core/04_MonteCarlo/MonteCarlo.h b/WCudaMSE/Student_Cuda/src/cpp/core/04_MonteCarlo/MonteCarlo.h new file mode 100644 index 0000000..3cd33ec --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/core/04_MonteCarlo/MonteCarlo.h @@ -0,0 +1,6 @@ +#ifndef MONTE_CARLO_H +#define MONTE_CARLO_H + +bool monteCarlo(); + +#endif diff --git a/WCudaMSE/Student_Cuda/src/cpp/core/mainCore.cpp b/WCudaMSE/Student_Cuda/src/cpp/core/mainCore.cpp index 2b24911..12ba6fa 100755 --- a/WCudaMSE/Student_Cuda/src/cpp/core/mainCore.cpp +++ b/WCudaMSE/Student_Cuda/src/cpp/core/mainCore.cpp @@ -1,22 +1,35 @@ #include #include +#include using namespace std; #include "01c_Saucisson/Saucisson.h" #include "02_ProduitScalaire/ProduitScalaire.h" #include "03_Histogramme/Histogramme.h" +#include "04_MonteCarlo/MonteCarlo.h" extern bool useHello(); extern bool addVectors(); -int mainCore() +int mainCore(const vector& args) { bool isOk = true; + + // Exemples simples. //isOk &= useHello(); //isOk &= addVectors(); - //isOk &= produitScalaire(); - //isOk &= saucisson(); - //isOk &= histogramme(); + + if (find(args.begin(), args.end(), "saucisson") != args.end()) + isOk &= saucisson(); + + if (find(args.begin(), args.end(), "produit-scalaire") != args.end()) + isOk &= produitScalaire(); + + if (find(args.begin(), args.end(), "histogramme") != args.end()) + isOk &= histogramme(); + + if (find(args.begin(), args.end(), "monte-carlo") != args.end()) + isOk &= monteCarlo(); cout << "\nisOK = " << isOk << endl; cout << "\nEnd : mainCore" << endl; diff --git a/WCudaMSE/Student_Cuda/src/cpp/main.cpp b/WCudaMSE/Student_Cuda/src/cpp/main.cpp index 495aa65..7c3dc42 100755 --- a/WCudaMSE/Student_Cuda/src/cpp/main.cpp +++ b/WCudaMSE/Student_Cuda/src/cpp/main.cpp @@ -1,14 +1,13 @@ #include +#include #include #include +using namespace std; #include "cudaTools.h" #include "Device.h" #include "LimitsTools.h" -using std::cout; -using std::endl; - /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ @@ -17,21 +16,21 @@ using std::endl; |* Imported *| \*-------------------------------------*/ -extern int mainCore(); +extern int mainCore(const vector& args); extern int mainTest(); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ -int main(void); +int main(int argc, char** argv); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ static void initCuda(int deviceId); -static int start(void); +static int start(const vector& args); /*----------------------------------------------------------------------*\ |* Implementation *| @@ -41,10 +40,14 @@ static int start(void); |* Public *| \*-------------------------------------*/ -int main(void) +int main(int argc, char** argv) { cout << "main" << endl; + vector args; + for (int i = 1; i < argc; ++i) + args.push_back(argv[i]); + // LimitsTools::rappelTypeSize(); if (Device::isCuda()) @@ -56,7 +59,7 @@ int main(void) // Server Cuda2: in [0,2] int deviceId = 0; - int isOk = start(); + int isOk = start(args); //cudaDeviceReset causes the driver to clean up all state. // While not mandatory in normal operation, it is good practice. @@ -86,19 +89,17 @@ void initCuda(int deviceId) // Device::loadCudaDriverAll();// Force driver to be load for all GPU } -int start(void) +int start(const vector& args) { Device::printCurrent(); - bool IS_TEST = false; - - if (IS_TEST) - { + if (args.size() == 0 || args[0] == "tests") + { return mainTest(); } else { - return mainCore(); + return mainCore(args); } } diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/01c_Saucisson/TestSaucisson.cpp b/WCudaMSE/Student_Cuda/src/cpp/test/junit/01c_Saucisson/TestSaucisson.cpp new file mode 100755 index 0000000..6ce0fcf --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/01c_Saucisson/TestSaucisson.cpp @@ -0,0 +1,14 @@ +#include "TestSaucisson.h" + +#include "Saucisson.h" + +TestSaucisson::TestSaucisson(int deviceId) : + deviceId(deviceId) + { + TEST_ADD(TestSaucisson::testSaucissonCuda); + } + +void TestSaucisson::testSaucissonCuda(void) + { + TEST_ASSERT(saucisson()); + } diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/01c_Saucisson/TestSaucisson.h b/WCudaMSE/Student_Cuda/src/cpp/test/junit/01c_Saucisson/TestSaucisson.h new file mode 100755 index 0000000..8af4e73 --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/01c_Saucisson/TestSaucisson.h @@ -0,0 +1,19 @@ +#ifndef TEST_SAUCISSON_H +#define TEST_SAUCISSON_H + +#include "cpptest.h" +using Test::Suite; + +class TestSaucisson : public Suite + { + public: + TestSaucisson(int deviceId); + + private: + void testSaucissonCuda(); + + private: + int deviceId; + }; + +#endif diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/02_ProduitScalaire/TestProduitScalaire.cpp b/WCudaMSE/Student_Cuda/src/cpp/test/junit/02_ProduitScalaire/TestProduitScalaire.cpp new file mode 100755 index 0000000..f43d7a8 --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/02_ProduitScalaire/TestProduitScalaire.cpp @@ -0,0 +1,14 @@ +#include "TestProduitScalaire.h" + +#include "ProduitScalaire.h" + +TestProduitScalaire::TestProduitScalaire(int deviceId) : + deviceId(deviceId) + { + TEST_ADD(TestProduitScalaire::testProduitScalaire); + } + +void TestProduitScalaire::testProduitScalaire(void) + { + TEST_ASSERT(produitScalaire()); + } diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/02_ProduitScalaire/TestProduitScalaire.h b/WCudaMSE/Student_Cuda/src/cpp/test/junit/02_ProduitScalaire/TestProduitScalaire.h new file mode 100755 index 0000000..6b437c4 --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/02_ProduitScalaire/TestProduitScalaire.h @@ -0,0 +1,19 @@ +#ifndef TEST_PRODUIT_SCALAIRE_H +#define TEST_PRODUIT_SCALAIRE_H + +#include "cpptest.h" +using Test::Suite; + +class TestProduitScalaire : public Suite + { + public: + TestProduitScalaire(int deviceId); + + private: + void testProduitScalaire(); + + private: + int deviceId; + }; + +#endif diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/03_Histogramme/TestHistogramme.cpp b/WCudaMSE/Student_Cuda/src/cpp/test/junit/03_Histogramme/TestHistogramme.cpp new file mode 100755 index 0000000..e350fe8 --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/03_Histogramme/TestHistogramme.cpp @@ -0,0 +1,14 @@ +#include "TestHistogramme.h" + +#include "Histogramme.h" + +TestHistogramme::TestHistogramme(int deviceId) : + deviceId(deviceId) + { + TEST_ADD(TestHistogramme::testHistogramme); + } + +void TestHistogramme::testHistogramme(void) + { + TEST_ASSERT(histogramme()); + } diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/03_Histogramme/TestHistogramme.h b/WCudaMSE/Student_Cuda/src/cpp/test/junit/03_Histogramme/TestHistogramme.h new file mode 100755 index 0000000..0ae01ec --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/03_Histogramme/TestHistogramme.h @@ -0,0 +1,19 @@ +#ifndef TEST_HISTOGRAMME_H +#define TEST_HISTOGRAMME_H + +#include "cpptest.h" +using Test::Suite; + +class TestHistogramme : public Suite + { + public: + TestHistogramme(int deviceId); + + private: + void testHistogramme(); + + private: + int deviceId; + }; + +#endif diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/04_MonteCarlo/TestMonteCarlo.cpp b/WCudaMSE/Student_Cuda/src/cpp/test/junit/04_MonteCarlo/TestMonteCarlo.cpp new file mode 100755 index 0000000..bf28f8d --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/04_MonteCarlo/TestMonteCarlo.cpp @@ -0,0 +1,14 @@ +#include "TestMonteCarlo.h" + +#include "MonteCarlo.h" + +TestMonteCarlo::TestMonteCarlo(int deviceId) : + deviceId(deviceId) + { + TEST_ADD(TestMonteCarlo::testMonteCarlo); + } + +void TestMonteCarlo::testMonteCarlo(void) + { + TEST_ASSERT(monteCarlo()); + } diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/junit/04_MonteCarlo/TestMonteCarlo.h b/WCudaMSE/Student_Cuda/src/cpp/test/junit/04_MonteCarlo/TestMonteCarlo.h new file mode 100755 index 0000000..3078f61 --- /dev/null +++ b/WCudaMSE/Student_Cuda/src/cpp/test/junit/04_MonteCarlo/TestMonteCarlo.h @@ -0,0 +1,19 @@ +#ifndef TEST_MONTE_CARLO_H +#define TEST_MONTE_CARLO_H + +#include "cpptest.h" +using Test::Suite; + +class TestMonteCarlo : public Suite + { + public: + TestMonteCarlo(int deviceId); + + private: + void testMonteCarlo(); + + private: + int deviceId; + }; + +#endif diff --git a/WCudaMSE/Student_Cuda/src/cpp/test/mainTest.cpp b/WCudaMSE/Student_Cuda/src/cpp/test/mainTest.cpp index d6a5144..a97db2e 100755 --- a/WCudaMSE/Student_Cuda/src/cpp/test/mainTest.cpp +++ b/WCudaMSE/Student_Cuda/src/cpp/test/mainTest.cpp @@ -8,7 +8,10 @@ #include "cudaTools.h" #include "TestHello.h" - +#include "TestSaucisson.h" +#include "TestProduitScalaire.h" +#include "TestHistogramme.h" +#include "TestMonteCarlo.h" using std::string; using std::cout; @@ -64,6 +67,10 @@ bool testALL() Suite testSuite; testSuite.add(std::auto_ptr(new TestHello(deviceId))); + testSuite.add(std::auto_ptr(new TestSaucisson(deviceId))); + testSuite.add(std::auto_ptr(new TestProduitScalaire(deviceId))); + testSuite.add(std::auto_ptr(new TestHistogramme(deviceId))); + testSuite.add(std::auto_ptr(new TestMonteCarlo(deviceId))); string titre = "deviceId_" + StringTools::toString(deviceId); diff --git a/WCudaMSE/Student_Cuda_Image/src/cpp/core/01_Rippling/provider/RipplingProvider.cpp b/WCudaMSE/Student_Cuda_Image/src/cpp/core/01_Rippling/provider/RipplingProvider.cpp index cade757..a5e07a0 100755 --- a/WCudaMSE/Student_Cuda_Image/src/cpp/core/01_Rippling/provider/RipplingProvider.cpp +++ b/WCudaMSE/Student_Cuda_Image/src/cpp/core/01_Rippling/provider/RipplingProvider.cpp @@ -28,7 +28,6 @@ /*-----------------*\ |* static *| \*----------------*/ - Rippling* RipplingProvider::createMOO() { const float dt = 1; diff --git a/WCudaMSE/Student_Cuda_Image/src/cpp/core/Viewer.h b/WCudaMSE/Student_Cuda_Image/src/cpp/core/Viewer.h index d77be2a..eaa4f75 100644 --- a/WCudaMSE/Student_Cuda_Image/src/cpp/core/Viewer.h +++ b/WCudaMSE/Student_Cuda_Image/src/cpp/core/Viewer.h @@ -12,10 +12,12 @@ class AutoViewer GLUTImageViewers viewer; public: - AutoViewer(bool isAnimation, bool isSelection, int pxFrame, int pyFrame): + AutoViewer(bool isAnimation, bool isSelection, int pxFrame, int pyFrame, bool run = true): ptrOutput(TProvider::createGL()), viewer(ptrOutput, isAnimation, isSelection, pxFrame, pyFrame) { + if (run) + GLUTImageViewers::runALL(); } ~AutoViewer() @@ -33,10 +35,12 @@ class Viewer GLUTImageViewers viewer; public: - Viewer(TOutput* output, bool isAnimation, bool isSelection, int pxFrame, int pyFrame): + Viewer(TOutput* output, bool isAnimation, bool isSelection, int pxFrame, int pyFrame, bool run = true): ptrOutput(output), viewer(ptrOutput, isAnimation, isSelection, pxFrame, pyFrame) { + if (run) + GLUTImageViewers::runALL(); } ~Viewer() diff --git a/WCudaMSE/Student_Cuda_Image/src/cpp/core/mainGL.cpp b/WCudaMSE/Student_Cuda_Image/src/cpp/core/mainGL.cpp index 10a9393..d7a9fec 100755 --- a/WCudaMSE/Student_Cuda_Image/src/cpp/core/mainGL.cpp +++ b/WCudaMSE/Student_Cuda_Image/src/cpp/core/mainGL.cpp @@ -18,18 +18,49 @@ using namespace std; #include "Viewer.h" -int mainGL(void) +int mainGL(const vector& args) { - // AutoViewer rippling0(true, true, 10, 10); - // AutoViewer rippling0(true, true, 10, 10); - // Viewer fractalMandelbrot(MandelbrotProvider::createGL(true), true, true, 20, 20); - // AutoViewer fractalJulia(true, true, 30, 30); - // AutoViewer newtown(true, true, 20, 20); - // AutoViewer heatTransfert(true, false, 20, 20); - // AutoViewer rayTracing(true, true, 20, 20); - Viewer convolution(ConvolutionProvider::createGL("/media/Data/Video/nasaFHD_short.avi"), true, true, 20, 20); - - GLUTImageViewers::runALL(); // Bloquant, Tant qu'une fenêtre est ouverte. + const string defaultCommand = "demo"; + const string command = args.size() > 0 ? args[0] : defaultCommand; + + // AutoViewer rippling0(true, true, 10, 10); // Warmup. + + if (command == "rippling") + { + AutoViewer rippling(true, true, 10, 10); + } + else if (command == "mandelbrot") + { + const bool multiGPU = args.size() >= 2 && args[1] == "--mp"; + Viewer fractalMandelbrot(MandelbrotProvider::createGL(multiGPU), true, true, 10, 10); + } + else if (command == "julia") + AutoViewer fractalJulia(true, true, 10, 10); + else if (command == "newton") + AutoViewer newtown(true, true, 10, 10); + else if (command == "heat-transfert") + AutoViewer heatTransfert(true, false, 10, 10); + else if (command == "convolution") + { + const string videoPath = args.size() >= 2 ? args[1] : "/media/Data/Video/nasaFHD_short.avi"; // Vidéo par défaut si pas donnée en paramètre. + Viewer convolution(ConvolutionProvider::createGL(videoPath), true, true, 10, 10); + } + else if (command == "demo") + { + Viewer convolution(ConvolutionProvider::createGL("/media/Data/Video/nasaFHD_short.avi"), true, true, 10, 10, false); + AutoViewer rippling(true, true, 60, 30, false); + Viewer fractalMandelbrot(MandelbrotProvider::createGL(false), true, true, 120, 60, false); + AutoViewer fractalJulia(true, true, 180, 80, false); + AutoViewer newtown(true, true, 260, 120, false); + AutoViewer heatTransfert(true, false, 1200, 300, false); + GLUTImageViewers::runALL(); + } + else + { + cout << "Command unknown: " << command << endl; + } + + // AutoViewer rayTracing(true, true, 20, 20); // Commenté car projet approfondit. return EXIT_SUCCESS; } diff --git a/WCudaMSE/Student_Cuda_Image/src/cpp/main.cpp b/WCudaMSE/Student_Cuda_Image/src/cpp/main.cpp index 397a133..5e56e8a 100755 --- a/WCudaMSE/Student_Cuda_Image/src/cpp/main.cpp +++ b/WCudaMSE/Student_Cuda_Image/src/cpp/main.cpp @@ -1,15 +1,14 @@ #include +#include #include #include +using namespace std; #include "cudaTools.h" #include "Device.h" #include "cudaTools.h" #include "GLUTImageViewers.h" -using std::cout; -using std::endl; - /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ @@ -18,7 +17,7 @@ using std::endl; |* Imported *| \*-------------------------------------*/ -extern int mainGL(void); +extern int mainGL(const vector& args); extern int mainFreeGL(void); /*--------------------------------------*\ @@ -31,7 +30,7 @@ int main(int argc, char** argv); |* Private *| \*-------------------------------------*/ -static int start(void); +static int start(const vector& args); static void initCuda(int deviceId); /*----------------------------------------------------------------------*\ @@ -57,7 +56,11 @@ int main(int argc, char** argv) // Server Cuda2: in [0,2] int deviceId = 0; - int isOk = start(); + vector args; + for (int i = 1; i < argc; ++i) + args.push_back(argv[i]); + + int isOk = start(args); //cudaDeviceReset causes the driver to clean up all state. // While not mandatory in normal operation, it is good practice. @@ -84,7 +87,7 @@ void initCuda(int deviceId) // Choose current device (state of host-thread) HANDLE_ERROR(cudaSetDevice(deviceId)); - // Enable Interoperabilité OpenGL: + // Enable Interoperabilit� OpenGL: // - Create a cuda specifique contexte, shared between Cuda and GL // - To be called before first call to kernel // - cudaSetDevice ou cudaGLSetGLDevice are mutualy exclusive @@ -95,7 +98,7 @@ void initCuda(int deviceId) // Device::loadCudaDriverAll();// Force driver to be load for all GPU } -int start(void) +int start(const vector& args) { Device::printCurrent(); @@ -103,7 +106,7 @@ int start(void) if (IS_GL) { - return mainGL(); // Bloquant, Tant qu'une fenetre est ouverte + return mainGL(args); // Bloquant, Tant qu'une fenetre est ouverte } else {