test-BOWN_Mod.C

00001 /*
00002  *  test-BOWN_Mod.C
00003  *
00004  *
00005  *  Created by Randolph Voorhies
00006  *
00007  */
00008 
00009 
00010 
00011 #include "Component/ModelManager.H"
00012 #include "Media/FrameSeries.H"
00013 #include "Transport/FrameIstream.H"
00014 #include "Media/MediaOpts.H"
00015 #include "Image/Image.H"
00016 #include "Image/Pixels.H"
00017 #include "Raster/Raster.H"
00018 #include "GUI/XWinManaged.H"
00019 #include "Image/ImageSet.H"
00020 #include "Image/DrawOps.H"
00021 #include "Image/ShapeOps.H"
00022 #include "Image/Kernels.H"
00023 #include "Image/Normalize.H"
00024 #include "Image/SimpleFont.H"
00025 #include "rutz/trace.h"
00026 #include <cstdio>
00027 #include <iostream>
00028 #include <string>
00029 #include <vector>
00030 
00031 #define PI 3.14159265F
00032 #define THETA_LEVELS 24
00033 #define MAP_SIZE 100
00034 #define MOD_WEIGHT_WIDTH MAP_SIZE/6
00035 #define OUTPUT_RES 500
00036 #define OUT_SCALE_FACT OUTPUT_RES/MAP_SIZE
00037 #define BRIGHTNESS 10
00038 
00039 using namespace std;
00040 
00041 float beta(Point2D<int> i, Point2D<int> j);
00042 float M(Image<float> WeightKernel, float theta, Point2D<int> i, Point2D<int> att);
00043 //Data Set 1: A single bistable vertical line
00044 void initDataSet1(vector < Image<float> > &V2,  Point2D<int> &attentionPoint, float &k);
00045 //Data Set 2: A single bistable PI/4 line
00046 void initDataSet2(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k);
00047 //Data Set 3: A single bistable square
00048 void initDataSet3(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k);
00049 //Data Set 4: A set of 5 ambiguously owned squares, sharing borders
00050 void initDataSet4(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k);
00051 //Data Set 5: A rough imitation of a "Rubins Vase,"
00052 void initDataSet5(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k);
00053 
00054 template <class T>
00055 void drawHalfLine(Image<T>& dst,
00056     const Point2D<int>& pos, float ori, float len, const T col,
00057                   const int rad = 1);
00058 
00059 template <class T>
00060 void flipVertical(Image <T> &Image);
00061 
00062 
00063 //Quick and dirty interface button
00064 class button {
00065 public:
00066   button() {
00067     width = int(.5*OUTPUT_RES-.1*OUTPUT_RES);
00068     height = int(.5*width);
00069     rect=Rectangle(topLeft, Dims(width, height));
00070   }
00071   button(const char* t, Point2D<int> pos) {
00072     strncpy(text,t,30);
00073     width = int(.5*OUTPUT_RES-.2*OUTPUT_RES);
00074     height = width/3;
00075 
00076     topLeft = pos;
00077 
00078     rect=Rectangle(topLeft, Dims(width, height));
00079   }
00080 
00081   bool inBounds(Point2D<int> click) {
00082     if(click.i > topLeft.i && click.i < topLeft.i+width && click.j > topLeft.j && click.j < topLeft.j+height)
00083       return true;
00084     else
00085       return false;
00086   }
00087 
00088   void draw(Image<PixRGB<byte> > &img) {
00089     drawRect(img, rect, PixRGB<byte>(255,0,0));
00090     writeText(img, topLeft+height/2-8, text, PixRGB<byte>(255,255,255), PixRGB<byte>(255,0,0), SimpleFont::FIXED(8), true);
00091   }
00092 
00093   char text[30];
00094   int width;
00095   int height;
00096   Point2D<int> topLeft;
00097   Rectangle rect;
00098 };
00099 
00100 int main(int argc, char* argv[]) {
00101 
00102 
00103   ModelManager manager("Border Ownership Modulator");
00104 
00105   //Start the model manager
00106   manager.start();
00107 
00108   //The Main Display Window
00109   rutz::shared_ptr<XWinManaged> window1;
00110   //rutz::shared_ptr<XWinManaged> window2;
00111 
00112 
00113   window1.reset(new XWinManaged(Dims(int(OUTPUT_RES*1.5),OUTPUT_RES), 0, 0, "Border Ownership"));
00114   //window2.reset(new XWinManaged(Dims(OUTPUT_RES*1.5,OUTPUT_RES), 0, 0, "Border Ownership"));
00115 
00116 
00117   //The precomputed gaussian kernel, normalized to 0.0 < value < 1.0
00118   Image<float> gaussianKernel = gaussian2D<float>(MOD_WEIGHT_WIDTH);
00119   normalizeFloat(gaussianKernel, FLOAT_NORM_0_255);
00120   gaussianKernel /= 255.0F;
00121 
00122   //An aggregation of V2 orientations showing the effects of attentional modulation
00123   Image< PixRGB<byte> > outputImage = Image< PixRGB<byte> >(Dims(OUTPUT_RES,OUTPUT_RES), ZEROS);
00124   Image< PixRGB<byte> > panelImage = Image< PixRGB<byte> >(Dims(OUTPUT_RES/2,OUTPUT_RES), ZEROS);
00125 
00126   //The V2 vector is an array of matrices that represents the hypercolumns in V2
00127   vector< Image<float> > V2;
00128   V2.resize(THETA_LEVELS, Image<float>(Dims(MAP_SIZE, MAP_SIZE), ZEROS));
00129 
00130   Point2D<int> attentionPoint; //The point of top down attention
00131 
00132   button but_Data1("Load Data Set 1", Point2D<int>(50, 30));
00133   button but_Data2("Load Data Set 2", Point2D<int>(50, 30+1*but_Data1.height));
00134   button but_Data3("Load Data Set 3", Point2D<int>(50, 30+2*but_Data1.height));
00135   button but_Data4("Load Data Set 4", Point2D<int>(50, 30+3*but_Data1.height));
00136   button but_Data5("Load Data Set 5", Point2D<int>(50, 30+4*but_Data1.height));
00137   button but_Quit("Quit",             Point2D<int>(50, OUTPUT_RES-but_Data1.height-10));
00138   but_Data1.draw(panelImage);
00139   but_Data2.draw(panelImage);
00140   but_Data3.draw(panelImage);
00141   but_Data4.draw(panelImage);
00142   but_Data5.draw(panelImage);
00143   but_Quit.draw(panelImage);
00144 
00145   float neuronStrength, modulationLevel, val;
00146   float k = 3;
00147 
00148   char key;
00149 
00150   initDataSet1(V2, attentionPoint, k);
00151 
00152   do {
00153     outputImage.clear();
00154     for(int theta_index = 0; theta_index < THETA_LEVELS; theta_index++) {
00155       for(int x = 0; x<MAP_SIZE; x++) {
00156         for(int y = 0; y<MAP_SIZE; y++) {
00157           float theta = float(theta_index)/float(THETA_LEVELS)*2.0F*PI;
00158 
00159           modulationLevel = M(gaussianKernel, theta, Point2D<int>(x,y), attentionPoint);
00160           neuronStrength=V2[theta_index].getVal(x,y);
00161           val = modulationLevel * neuronStrength * k;
00162 
00163           //Draw the original V2 State
00164           if(neuronStrength > 0) {
00165             drawHalfLine(outputImage, Point2D<int>(x*OUT_SCALE_FACT, y*OUT_SCALE_FACT), -theta, OUT_SCALE_FACT*3/4, PixRGB<byte>(75,75,75));
00166           }
00167 
00168           //Draw the attentional Modulation Vectors
00169           if(val > 0) {
00170 
00171             PixRGB<byte> curr = outputImage.getVal(x*OUT_SCALE_FACT,y*OUT_SCALE_FACT);
00172 
00173 
00174 
00175 
00176               drawHalfLine(outputImage, Point2D<int>(x*OUT_SCALE_FACT, y*OUT_SCALE_FACT), -(theta-PI/2.0f), val*10,
00177                          PixRGB<byte>(curr + PixRGB<byte>(255*val*BRIGHTNESS,255*val*BRIGHTNESS, 0)) );
00178 
00179             drawHalfLine(outputImage, Point2D<int>(x*OUT_SCALE_FACT, y*OUT_SCALE_FACT), theta, OUT_SCALE_FACT/2,
00180                          PixRGB<byte>(curr + PixRGB<byte>(255*val*BRIGHTNESS,255*val*BRIGHTNESS, 0)) );
00181 
00182              drawHalfLine(outputImage, Point2D<int>(x*OUT_SCALE_FACT, y*OUT_SCALE_FACT), -theta, OUT_SCALE_FACT/2,
00183                           PixRGB<byte>(curr + PixRGB<byte>(255*val*BRIGHTNESS,255*val*BRIGHTNESS, 0)) );
00184           }
00185 
00186 
00187         }
00188       }
00189     }
00190     /*
00191 
00192     Image<float> temp =  Image<float>(Dims(OUTPUT_RES, OUTPUT_RES), ZEROS);
00193     for(int i=0; i<MAP_SIZE; i++) {
00194       for(int j=0; j<MAP_SIZE; j++) {
00195         float val =  M(gaussianKernel, 0, Point2D<int>(MAP_SIZE/2,MAP_SIZE/2), Point2D<int>(i,j));
00196         temp.setVal(i*OUT_SCALE_FACT, j*OUT_SCALE_FACT, val);
00197       }
00198     }
00199     flipVertical(temp);
00200     window2->drawImage(temp,0,0);
00201     */
00202 
00203 
00204 
00205     //Draw the attention point as a red cross
00206     drawCross(outputImage, attentionPoint*OUT_SCALE_FACT, PixRGB<byte>(255,0,0));
00207 
00208     /*drawHalfLine(outputImage, attentionPoint*OUT_SCALE_FACT, PI, 30,
00209       PixRGB<byte>(PixRGB<byte>(255,255, 0)) );*/
00210 
00211     char buffer[20];
00212     sprintf(buffer,"(%d, %d)", attentionPoint.i, attentionPoint.j);
00213 
00214     //Because the image coordinates have y=0 at the top of the screen, the output is flipped to give you a more sane image
00215     flipVertical(outputImage);
00216 
00217     //Write the attention point coordinates
00218     writeText(outputImage, Point2D<int>(attentionPoint.i*OUT_SCALE_FACT+5, outputImage.getHeight() - attentionPoint.j*OUT_SCALE_FACT+15), buffer, PixRGB<byte>(255,0,0), PixRGB<byte>(0,0,0), SimpleFont::FIXED(6), false);
00219 
00220     //Draw the border ownership model, and the interface panel into the display window
00221     window1->drawImage(outputImage,0,0);
00222     window1->drawImage(panelImage,outputImage.getWidth(),0);
00223 
00224 
00225     key = window1->getLastKeyPress();
00226     //These key presses may be OS dependent. If you want keyboard support, just put a print statement here,
00227     //print out the keys as they are pressed, and map them to the correct actions.
00228     switch(key) {
00229     case -122:
00230       attentionPoint.j++;
00231       break;
00232     case -123:
00233       attentionPoint.j--;
00234       break;
00235     case -125:
00236       attentionPoint.i--;
00237       break;
00238     case -124:
00239       attentionPoint.i++;
00240       break;
00241     case 26:
00242       initDataSet1(V2, attentionPoint, k);
00243       break;
00244     case 27:
00245       initDataSet2(V2, attentionPoint, k);
00246       break;
00247     case 28:
00248       initDataSet3(V2, attentionPoint, k);
00249       break;
00250     case 29:
00251       initDataSet4(V2, attentionPoint, k);
00252       break;
00253     case 31:
00254       initDataSet5(V2, attentionPoint, k);
00255       break;
00256     }
00257 
00258     Point2D<int> p;
00259     p = window1->getLastMouseClick();
00260     if (p.isValid()) {
00261       //Check for button hits
00262       Point2D<int> panelPoint = Point2D<int>(p.i-OUTPUT_RES, p.j);
00263       if(but_Data1.inBounds(panelPoint)) {
00264         initDataSet1(V2, attentionPoint, k);
00265       }
00266       if(but_Data2.inBounds(panelPoint)) {
00267         initDataSet2(V2, attentionPoint, k);
00268       }
00269       if(but_Data3.inBounds(panelPoint)) {
00270         initDataSet3(V2, attentionPoint, k);
00271       }
00272       if(but_Data4.inBounds(panelPoint)) {
00273         initDataSet4(V2, attentionPoint, k);
00274       }
00275       if(but_Data5.inBounds(panelPoint)) {
00276         initDataSet5(V2, attentionPoint, k);
00277       }
00278       if(but_Quit.inBounds(panelPoint)) {
00279         LINFO("Qutting... Thanks!");
00280         exit(0);
00281       }
00282       //Otherwise, place the attention point on the mouse click
00283       else if(p.i < OUTPUT_RES) {
00284         p/=OUT_SCALE_FACT;
00285 
00286         p.j = MAP_SIZE-p.j;
00287         attentionPoint = p;
00288       }
00289     }
00290   } while(key != 20);
00291 }
00292 
00293 //Given a certain point i in an orientation level theta in V2, and an attention point,
00294 //find the top-down attentional modulation level of that point.
00295 //This simulates the synaptic weightings between V2 and an attentional layer, whether that be top-down or bottom-up
00296 float M(Image<float> WeightKernel, float theta, Point2D<int> i, Point2D<int> att) {
00297 
00298   if(i.distance(att) > WeightKernel.getWidth()/2 || i.distance(att) == 0)
00299     return 0.0F;
00300 
00301   float theta_prime = beta(i,att);
00302 
00303   if(theta_prime == theta || (theta_prime >= theta + PI -.01 && theta_prime <= theta+PI+.01) || (theta_prime >= theta - PI -.01 && theta_prime <= theta-PI+.01))
00304     return 0.0f;
00305 
00306   if((theta <= PI && theta_prime >= theta && theta_prime <= theta+PI) ||
00307      (theta > PI && (theta_prime >= theta || theta_prime < theta-PI)))
00308     return 0;
00309   else {
00310     return WeightKernel.getVal(WeightKernel.getWidth()/2 + abs(i.i - att.i),WeightKernel.getHeight()/2 + abs(i.j - att.j));
00311   }
00312 }
00313 
00314 
00315 //Returns the angle between i and j. The angle returned is in radians, and ranges from 0-2PI
00316 float beta(Point2D<int> i, Point2D<int> j) {
00317 
00318   if(j.j-i.j>=0)
00319     return atan2(j.j - i.j, j.i - i.i);
00320   else
00321     return 2.0f*PI + atan2(j.j - i.j, j.i - i.i);
00322 }
00323 
00324 
00325 //Flip an image on about it's x axis
00326 template <class T>
00327 void flipVertical(Image <T> &Image) {
00328   T temp;
00329 
00330   for(int x = 0; x<Image.getWidth(); x++) {
00331     for(int y=0; y<Image.getHeight()/2; y++) {
00332       temp = Image.getVal(x,Image.getHeight() - y - 1);
00333       Image.setVal(x,Image.getHeight() - y - 1, Image.getVal(x,y));
00334       Image.setVal(x,y,temp);
00335     }
00336   }
00337 }
00338 
00339 
00340 //Initialize V2 with data set 1: A single vertical line with ambiguous ownership.
00341 //The attention point is initially to the right of the line, but the viewer should manually move it around to watch the model pick
00342 void initDataSet1(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k) {
00343   int theta_index = THETA_LEVELS/4;
00344   int theta_index2 = THETA_LEVELS*3/4;
00345   Dims d = V2[0].getDims();
00346 
00347   k = 7.0f;
00348 
00349   //Clear out V2
00350   for(int i=0; i<THETA_LEVELS; i++)
00351    V2[i].clear();
00352 
00353   //Draw a 1 pixel line through the middle of the PI/2 layer of V2
00354   //This effectively simulates the activation of a right owned vertical line
00355   drawLine(V2[theta_index], Point2D<int>(d.w()/2,0), Point2D<int>(d.w()/2,d.h()), 1.0F);
00356 
00357   //Draw a 1 pixel line through the middle of the 3PI/2 layer of V2
00358   //This effectively simulates the activation of a left owned vertical line
00359   drawLine(V2[theta_index2], Point2D<int>(d.w()/2,0), Point2D<int>(d.w()/2,d.h()), 1.0F);
00360 
00361 
00362   //Put the attention point 5% to the right of the line
00363   attentionPoint = Point2D<int>(int(d.w()/2+d.w()*.05), d.h()/2);
00364 }
00365 
00366 //Initialize V2 with data set 2: A horizontal vertical line with ambiguous ownership.
00367 //The attention point is initially to the above the line, but the viewer should manually move it around to watch the model pick
00368 void initDataSet2(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k) {
00369   int theta_down_right = THETA_LEVELS/8;
00370   int theta_up_left = THETA_LEVELS*5/8;
00371   Dims d = V2[0].getDims();
00372 
00373   k = 7.0f;
00374 
00375   //Clear out V2
00376   for(int i=0; i<THETA_LEVELS; i++)
00377    V2[i].clear();
00378 
00379   //Draw a 1 pixel line through the middle of the PI layer of V2
00380   //This effectively simulates the activation of an upwards owned horizontal line
00381   drawLine(V2[theta_up_left], Point2D<int>(0,0), Point2D<int>(d.w()-1,d.h()-1), 1.0F);
00382 
00383   //Draw a 1 pixel line through the middle of the 0 layer of V2
00384   //This effectively simulates the activation of a downwards owned horizontal line
00385    drawLine(V2[theta_down_right], Point2D<int>(0,0), Point2D<int>(d.w()-1,d.h()-1), 1.0F);
00386 
00387 
00388   //Put the attention point 5% to the right of the line
00389   attentionPoint = Point2D<int>(d.w()/2, int(d.h()/2+d.h()*.05));
00390 }
00391 
00392 
00393 //Initialize V2 with data set 3: An ambigously owned square in the middle of the image
00394 //The attention point is initially to the right of the line, but the viewer should manually move it around to watch the model pick
00395 void initDataSet3(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k) {
00396 
00397   Dims d = V2[0].getDims();
00398 
00399   int theta_down = 0;
00400   int theta_right = THETA_LEVELS/4;
00401   int theta_up = THETA_LEVELS/2;
00402   int theta_left = THETA_LEVELS*3/4;
00403 
00404   Point2D<int> topRight = Point2D<int>(d.w()*3/4, d.h()*3/4);
00405   Point2D<int> topLeft = Point2D<int>(d.w()/4, d.h()*3/4);
00406   Point2D<int> bottomRight = Point2D<int>(d.w()*3/4, d.h()/4);
00407   Point2D<int> bottomLeft = Point2D<int>(d.w()/4, d.h()/4);
00408 
00409   k = 7.0f;
00410 
00411  //Clear out V2
00412  for(int i=0; i<THETA_LEVELS; i++)
00413    V2[i].clear();
00414 
00415   //Draw the right line with bown to the right
00416   drawLine(V2[theta_right], bottomRight, topRight, 1.0F);
00417   //Draw the right line with bown to the left
00418   drawLine(V2[theta_left], bottomRight, topRight, 1.0F);
00419 
00420   //Draw the top line with bown up
00421   drawLine(V2[theta_up],topLeft, topRight, 1.0F);
00422   //Draw the top line with bown down
00423   drawLine(V2[theta_down],topLeft, topRight, 1.0F);
00424 
00425   //Draw the left line with bown right
00426   drawLine(V2[theta_right],topLeft, bottomLeft, 1.0F);
00427   //Draw the left line with bown left
00428   drawLine(V2[theta_left],topLeft, bottomLeft, 1.0F);
00429 
00430   //Draw the bottom line with bown up
00431   drawLine(V2[theta_up],bottomLeft, bottomRight, 1.0F);
00432   //Draw the bottom line with bown down
00433   drawLine(V2[theta_down],bottomLeft, bottomRight, 1.0F);
00434 
00435   attentionPoint = Point2D<int>(d.w()/2, d.h()/2);
00436 }
00437 
00438 
00439 //Initialize V2 with data set 4: A set of 5 ambiguously owned squares, sharing borders
00440 void initDataSet4(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k) {
00441   int theta_down = 0;
00442   int theta_right = THETA_LEVELS/4;
00443   int theta_up = THETA_LEVELS/2;
00444   int theta_left = THETA_LEVELS*3/4;
00445 
00446   Dims d = V2[0].getDims();
00447 
00448   k = 2.5f;
00449 
00450   //Clear out V2
00451   for(int i=0; i<THETA_LEVELS; i++)
00452     V2[i].clear();
00453 
00454   drawLine(V2[theta_up],   Point2D<int>(d.w()/8,d.h()*3/8), Point2D<int>(d.w()*7/8,d.h()*3/8), 1.0F);
00455   drawLine(V2[theta_down], Point2D<int>(d.w()/8,d.h()*3/8), Point2D<int>(d.w()*7/8,d.h()*3/8), 1.0F);
00456 
00457   drawLine(V2[theta_up],   Point2D<int>(d.w()/8,d.h()*5/8), Point2D<int>(d.w()*7/8,d.h()*5/8), 1.0F);
00458   drawLine(V2[theta_down], Point2D<int>(d.w()/8,d.h()*5/8), Point2D<int>(d.w()*7/8,d.h()*5/8), 1.0F);
00459 
00460   drawLine(V2[theta_left],  Point2D<int>(d.w()/8,d.h()*3/8), Point2D<int>(d.w()/8,d.h()*5/8), 1.0F);
00461   drawLine(V2[theta_right], Point2D<int>(d.w()/8,d.h()*3/8), Point2D<int>(d.w()/8,d.h()*5/8), 1.0F);
00462 
00463   drawLine(V2[theta_left],  Point2D<int>(d.w()*7/8,d.h()*3/8), Point2D<int>(d.w()*7/8,d.h()*5/8), 1.0F);
00464   drawLine(V2[theta_right], Point2D<int>(d.w()*7/8,d.h()*3/8), Point2D<int>(d.w()*7/8,d.h()*5/8), 1.0F);
00465 
00466 
00467 
00468   drawLine(V2[theta_left],   Point2D<int>(d.w()*3/8, d.h()/8), Point2D<int>(d.w()*3/8,d.h()*7/8), 1.0F);
00469   drawLine(V2[theta_right],  Point2D<int>(d.w()*3/8, d.h()/8), Point2D<int>(d.w()*3/8,d.h()*7/8), 1.0F);
00470 
00471   drawLine(V2[theta_left],   Point2D<int>(d.w()*5/8, d.h()/8), Point2D<int>(d.w()*5/8,d.h()*7/8), 1.0F);
00472   drawLine(V2[theta_right],  Point2D<int>(d.w()*5/8, d.h()/8), Point2D<int>(d.w()*5/8,d.h()*7/8), 1.0F);
00473 
00474   drawLine(V2[theta_up],   Point2D<int>(d.w()*3/8,d.h()/8), Point2D<int>(d.w()*5/8,d.h()/8), 1.0F);
00475   drawLine(V2[theta_down], Point2D<int>(d.w()*3/8,d.h()/8), Point2D<int>(d.w()*5/8,d.h()/8), 1.0F);
00476 
00477   drawLine(V2[theta_up],   Point2D<int>(d.w()*3/8,d.h()*7/8), Point2D<int>(d.w()*5/8,d.h()*7/8), 1.0F);
00478   drawLine(V2[theta_down], Point2D<int>(d.w()*3/8,d.h()*7/8), Point2D<int>(d.w()*5/8,d.h()*7/8), 1.0F);
00479 
00480   //Put the attention point 5% to the right of the line
00481   attentionPoint = Point2D<int>(d.w()/2, int(d.h()/2+d.h()*.05));
00482 }
00483 
00484 
00485 
00486 //Initialize V2 with data set 5: A rough imitation of Rubins Vase
00487 void initDataSet5(vector< Image<float> > &V2, Point2D<int> &attentionPoint, float &k) {
00488 
00489   Dims d = V2[0].getDims();
00490 
00491   int x1  = d.w()/12;
00492   int x4  = d.w()/3;
00493   int x8  = d.w()*8/12;
00494   int x11 = d.w()*11/12;
00495 
00496   k = 4.0f;
00497 
00498   //Clear out V2
00499   for(int i=0; i<THETA_LEVELS; i++)
00500     V2[i].clear();
00501 
00502   drawLine(V2[THETA_LEVELS*3/8], Point2D<int>(x1,x11), Point2D<int>(x4,x8), 1.0F);
00503   drawLine(V2[THETA_LEVELS*7/8], Point2D<int>(x1,x11), Point2D<int>(x4,x8), 1.0F);
00504 
00505   drawLine(V2[THETA_LEVELS/4],   Point2D<int>(x4,x8), Point2D<int>(x4,x4), 1.0F);
00506   drawLine(V2[THETA_LEVELS*3/4], Point2D<int>(x4,x8), Point2D<int>(x4,x4), 1.0F);
00507 
00508   drawLine(V2[THETA_LEVELS/8],   Point2D<int>(x4,x4), Point2D<int>(x1,x1), 1.0F);
00509   drawLine(V2[THETA_LEVELS*5/8], Point2D<int>(x4,x4), Point2D<int>(x1,x1), 1.0F);
00510 
00511   drawLine(V2[THETA_LEVELS/8], Point2D<int>(d.w()-x1,x11), Point2D<int>(d.w()-x4,x8), 1.0F);
00512   drawLine(V2[THETA_LEVELS*5/8], Point2D<int>(d.w()-x1,x11), Point2D<int>(d.w()-x4,x8), 1.0F);
00513 
00514   drawLine(V2[THETA_LEVELS/4],   Point2D<int>(d.w()-x4,x8), Point2D<int>(d.w()-x4,x4), 1.0F);
00515   drawLine(V2[THETA_LEVELS*3/4], Point2D<int>(d.w()-x4,x8), Point2D<int>(d.w()-x4,x4), 1.0F);
00516 
00517   drawLine(V2[THETA_LEVELS*3/8],   Point2D<int>(d.w()-x4,x4), Point2D<int>(d.w()-x1,x1), 1.0F);
00518   drawLine(V2[THETA_LEVELS*7/8], Point2D<int>(d.w()-x4,x4), Point2D<int>(d.w()-x1,x1), 1.0F);
00519 }
00520 
00521 
00522 template <class T>
00523 void drawHalfLine(Image<T>& dst,
00524     const Point2D<int>& pos, float ori, float len, const T col,
00525     const int rad = 1)
00526 {
00527 
00528 GVX_TRACE(__PRETTY_FUNCTION__);
00529   ASSERT(dst.initialized());
00530 
00531   int x1 = int(cos(ori)*len);
00532   int y1 = int(sin(ori)*len);
00533 
00534   Point2D<int> p1 = pos;
00535   Point2D<int> p2(pos.i+x1, pos.j-y1);
00536 
00537   drawLine(dst, p1, p2, col, rad);
00538 
00539 }
00540 
Generated on Sun May 8 08:06:45 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3