Archives mensuelles : mai 2015

image_pdfimage_print

TFT display and aria g25

 

tftariag25
I made a TJCTM24024 LCD working on aria g25
http://www.elecfreaks.com/store/24-tft-lcd-tft0124sp-p-785.html

The connections are below :
LCD  ARIA

Vin   5V
GND   GND
LED  3.3V
CS   PA14
RESET PA22
DC    PA23
MOSI  PA12
MISO  PA11
CLK   PA13

Most instructions come from http://www.acmesystems.it/arietta_adafruit_lcd28

The  acme-aria.dts concerning the TFT display is

spi0: spi@f0000000 {
status = "okay";
cs-gpios = <&pioA 14 0>;

device@0 {
/* rotate = ;*/

fps = ;
compatible = « fb_ili9341 »;
spi-max-frequency = ;
reg = ;
/* regwidth = ;*/
buswidth = ;
verbose = ;
reset-gpios = <&pioA 22 0>; /* PA22 – Karim */
dc-gpios = <&pioA 23 0>; /* PA23 – Karim */

};

};

 

I have added an USB Audio device to add headphones and I have compiled the kernel following http://en.gnublin.org/index.php/Audio

root@acmeboard:~# mplayer -framedrop -ao alsa -cache 8192 -vo fbdev -vfm ffmpeg
-lavdopts lowres=1:fast:skiploopfilter=all perunpugnodidollari.mp4

 

 

The laser hits a small moving target

Prototype for a mosquitoes zapper made with :
– laser diode + alim for blu-ray burner 405 nm (DANGEROUS !!!)
– galvanometers to move the laser
– webcam
– DAC + ampli ops
– usb/spi converters
– relay to switch on/off laser
– linux computer + opencv software
I have to test with mosquitoes 🙂 (and glasses)
On the video, a small piece of black paper on a wire nilon is moving.

The main python code below :

'''


O----------------------> X (up to 1280)
|
|
|        
|           L
|
|                     C
|
Y 
(up to 720)

L laser (Xl,Yl)
C cible (Xc,Yc)

Depx=Xc-Xl
Depy=Yc-Yl



'''


import math,sys,os,cv,time
import Queue,threading
import numpy as np
import signal

USE_LASER=True

DETECT_VISAGE=False

CAMERA=1 # 0 cam internal, 1 cam external

CONF='1280x720'

# Size, colors parameters
SIZETOKEEP=400

# colors to ignore to ignore spot laser
H=100
S=10
V=255

USB2SPI='/home/karim/perso/elec/moustiques/soft/WRITEf '
LASER='/home/karim/perso/elec/moustiques/soft/laser '
LED='/home/karim/perso/elec/moustiques/soft/led '

if CONF == '1280x720' :
    # violet
    # position laser au centre de l image
    Xl_center=641
    Yl_center=354
    ADCy_center=1137
    ADCx_center=1466

    # extremites image que peut atteindre le laser
    ### y
    Yl_maxhaut=170
    ADCy_maxhaut=1
    Yl_minbas=713
    ADCy_minbas=2937

    ### x
    Xl_mingauche=268
    ADCx_mingauche=4081
    Xl_maxdroite=853
    ADCx_maxdroite=2
else:
    # violet
    # position laser au centre de l image
    Xl_center=641
    Yl_center=360
    ADCy_center=1052
    ADCx_center=1409

    # extremites image que peut atteindre le laser
    ### y
    Yl_maxhaut=162
    ADCy_maxhaut=2
    Yl_minbas=699
    ADCy_minbas=2835

    ### x
    Xl_mingauche=243
    ADCx_mingauche=4080
    Xl_maxdroite=854
    ADCx_maxdroite=1

# nb adc / pixel X
pasx=math.fabs(1.0*(ADCx_maxdroite-ADCx_mingauche)/(Xl_maxdroite-Xl_mingauche))
#print ADCx_maxdroite-ADCx_mingauche,Xl_maxdroite-Xl_mingauche
#print 'pasx=',pasx

def move_x(Xp):
    ADCp=ADCx_center-(Xp-Xl_center)*pasx
    #print 'ADCp=',ADCp
    com=USB2SPI+" 0 "+str(int(ADCp))
    #print com
    res=os.system(com)
    if res != 0 :
        print 'Can not run '+com
        sys.exit(1)

# nb adc / pixel Y
pasy=math.fabs(1.0*(ADCy_minbas-ADCy_maxhaut)/(Yl_minbas-Yl_maxhaut))

def move_y(Yp):
    ADCp=ADCy_center+(Yp-Yl_center)*pasy
    #print 'ADCp=',ADCp
    com=USB2SPI+" 1 "+str(int(ADCp))
    #print com
    res=os.system(com)
    if res != 0 :
        print 'Can not run '+com
        sys.exit(1)

def inside(p, r): # p in rect
    (rx, ry), (rw, rh) = r
    (px, py) = p
    print p,' is inside ',r
    return px > rx and py > ry and rx + rw > px  and ry + rh > py 
	
#
# BBoxes must be in the format:
# ( (topleft_x), (topleft_y) ), ( (bottomright_x), (bottomright_y) ) )
top = 0
bottom = 1
left = 0
right = 1

def merge_collided_bboxes( bbox_list ):
	# For every bbox...
	for this_bbox in bbox_list:
		
		# Collision detect every other bbox:
		for other_bbox in bbox_list:
			if this_bbox is other_bbox: continue  # Skip self
			
			# Assume a collision to start out with:
			has_collision = True
			
			# These coords are in screen coords, so > means 
			# "lower than" and "further right than".  And < 
			# means "higher than" and "further left than".
			
			# We also inflate the box size by 10% to deal with
			# fuzziness in the data.  (Without this, there are many times a bbox
			# is short of overlap by just one or two pixels.)
			if (this_bbox[bottom][0]*1.1 < other_bbox[top][0]*0.9): has_collision = False
			if (this_bbox[top][0]*.9 > other_bbox[bottom][0]*1.1): has_collision = False
			
			if (this_bbox[right][1]*1.1 < other_bbox[left][1]*0.9): has_collision = False
			if (this_bbox[left][1]*0.9 > other_bbox[right][1]*1.1): has_collision = False
			
			if has_collision:
				# merge these two bboxes into one, then start over:
				top_left_x = min( this_bbox[left][0], other_bbox[left][0] )
				top_left_y = min( this_bbox[left][1], other_bbox[left][1] )
				bottom_right_x = max( this_bbox[right][0], other_bbox[right][0] )
				bottom_right_y = max( this_bbox[right][1], other_bbox[right][1] )
				
				new_bbox = ( (top_left_x, top_left_y), (bottom_right_x, bottom_right_y) )
				
				bbox_list.remove( this_bbox )
				bbox_list.remove( other_bbox )
				bbox_list.append( new_bbox )
				
				# Start over with the new list:
				return merge_collided_bboxes( bbox_list )
	
	# When there are no collions between boxes, return that list:
	return bbox_list

class WorkerDraw(threading.Thread):

    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue

    def run(self):

        while True:
            point = self.queue.get()
            print '=========> point =',point
            
            x,y=point['point'][0],point['point'][1]

            # move laser
            move_x(x)
            move_y(y)
       
            # switch on
            com=LASER+' 1'
            res=os.system(com)
            #res=0
            if res != 0 :
                print 'Can not run '+com
                sys.exit(1)

            time.sleep(0)

            # switch off
            com=LASER+' 0'
            res=os.system(com)
            #res=0
            if res != 0 :
                print 'Can not run '+com
                sys.exit(1)
                
            self.queue.task_done()

class Target:
    def __init__(self):
        self.capture = cv.CaptureFromCAM(CAMERA)

        if CONF == '1280x720':
            cv.SetCaptureProperty(self.capture,cv.CV_CAP_PROP_FRAME_WIDTH,1280)
            cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
            cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FPS, 60)
        time.sleep(2)

        cv.NamedWindow("Target", CAMERA)

    def detect_faces(self, image, haar_cascade, mem_storage ):

	faces = []
	image_size = cv.GetSize( image )
        #for ((x, y, w, h), n) in faces:
        #The first 4 numbers are the x,y location of the top-left point, and the height, width of the bounding box
	#faces = cv.HaarDetectObjects(grayscale, haar_cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (20, 20) )
	#faces = cv.HaarDetectObjects(image, haar_cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING )
	#faces = cv.HaarDetectObjects(image, haar_cascade, storage )
	#faces = cv.HaarDetectObjects(image, haar_cascade, mem_storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, ( 16, 16 ) )
	#faces = cv.HaarDetectObjects(image, haar_cascade, mem_storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, ( 4,4 ) )
	faces = cv.HaarDetectObjects(image, haar_cascade, mem_storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, ( image_size[0]/10, image_size[1]/10) )
	
	for face in faces:
		box = face[0]
		cv.Rectangle(image, ( box[0], box[1] ),
			( box[0] + box[2], box[1] + box[3]), cv.RGB(124, 252, 0), 1, 8, 0)
        return faces


    def run(self):

        out_queue=Queue.Queue()

        if DETECT_VISAGE == True :
            # ## Face detection stuff
            haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
            # visage
            #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt.xml' )
            # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
            # haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
            #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
            # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
            # haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
            # haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )
		

        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
     
        print 'width=',frame.width,',height=',frame.height,',depth=',frame.depth

        #sys.exit(0)

        frame_size = cv.GetSize(frame)

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        t = WorkerDraw(out_queue)
        t.start()

        prev_x=0
        prev_y=0

        t0= time.time()
        Process=False
        
        # loop over images
        while True:

            onFace=False
            
            # Capture frame from webcam
            color_image = cv.QueryFrame(self.capture)
 
            ### cam limits
            pt1=(Xl_mingauche,Yl_maxhaut)
            pt2=(Xl_maxdroite,Yl_minbas)
            cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            t1 = time.time()

            #######################################
            hsv_img = cv.CreateImage(cv.GetSize(color_image), 8, 3)
            hsv = cv.CvtColor(color_image, hsv_img,cv.CV_BGR2HSV)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
            if not difference:
                #print 'no diff'
                # Initialize
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                #print 'diff'
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get object blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            # Calculate movements
            storage = cv.CreateMemStorage(0)

            if DETECT_VISAGE == True :
                # detect objects
                faces=self.detect_faces( color_image, haar_cascade, storage )
            

            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []   # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []
            while contour:	
                bounding_rect = cv.BoundingRect( list(contour) )
                point1 = ( bounding_rect[0], bounding_rect[1] )
                point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )
	
                bounding_box_list.append( ( point1, point2 ) )
                polygon_points = cv.ApproxPoly( list(contour), storage, cv.CV_POLY_APPROX_DP )
				
 
                contour = contour.h_next()
                                
			
                # Find the average size of the bbox (targets), then
                # remove any tiny bboxes (which are prolly just noise).
                # "Tiny" is defined as any box with 1/10th the area of the average box.
                # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append( box_width * box_height )
				
                #cv.Rectangle( color_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)
			
            average_box_area = 0.0
            if len(box_areas): 
                average_box_area = float( sum(box_areas) ) / len(box_areas)
			
            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
				
                # Only keep the box if it's not a tiny noise box:

                if (box_width * box_height)-SIZETOKEEP > 200 : 
                    print 'ignore size'
                else:
                    print 'size=',box_width * box_height
                
                if (box_width * box_height) > average_box_area*0.1 and math.fabs((box_width * box_height)-SIZETOKEEP) < 200 : 
                #if (box_width * box_height) > average_box_area*0.1  : 
                    trimmed_box_list.append( box )
			
                # Draw the trimmed box list:
                #for box in trimmed_box_list:
                #    cv.Rectangle( color_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )
				
            bounding_box_list = merge_collided_bboxes( trimmed_box_list )

            # Draw the merged box list:
	    center_points=[]				
            for box in bounding_box_list:
                #cv.Rectangle( color_image, box[0], box[1], cv.CV_RGB(0,255,0), 1 )
                x=int((box[0][0]+box[1][0])/2.0)
                y=int((box[0][1]+box[1][1])/2.0)
                center_point=(x,y)
                #print center_point
                center_points.append( center_point )
                #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
                #cv.Circle(color_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
                #cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
                #cv.Circle(color_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

                (h,s,v,r)=cv.Get2D(hsv_img,y,x)

                if DETECT_VISAGE == True :
                    p=(x,y)
                    for f in faces:
                        r=((f[0][0],f[0][1]),(f[0][2],f[0][3]))
                        if inside(p,r):
                            print 'Point inside face !!!!!!!'
                            onFace=True

                if t1-t0 > 30 and Process == False: # 30s le temps de chauffer
                    Process=True
                    print 'Processing !'

                if  x < Xl_mingauche or  x > Xl_maxdroite or y > Yl_minbas or y < Yl_maxhaut :
                    print 'ignore bounds'

                if math.fabs(H-h) <50 and math.fabs(S-s) < 50 :
                            print 'ignore color'
 
                if out_queue.empty()  and onFace==False and Process == True  and USE_LASER == True and (math.fabs(H-h) > 50 or math.fabs(S-s) > 50 ) and (x > Xl_mingauche and  x < Xl_maxdroite and y < Yl_minbas and y > Yl_maxhaut):
                #if out_queue.empty() and Process == True and (x > Xl_mingauche and  x < Xl_maxdroite and y < Yl_minbas and y > Yl_maxhaut) :
                    print 'ADDED','x=',x,'y=',y,'H=',h,'S=',s,'V=',v
                    out_queue.put({'point': (x,y)})
                    #cv.SaveImage('target_x'+str(x)+'_y'+str(y)+'_H'+str(h)+'_S'+str(s)+'_V'+str(v)+'.jpg',color_image)
                    cv.Rectangle( color_image, box[0], box[1], cv.CV_RGB(0,255,0), 1)
                    cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
                    cv.Circle(color_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
                    cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
                    cv.Circle(color_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            estimated_target_count = len( bounding_box_list )
            print 'estimated_target_count=',estimated_target_count
            
            # Display frame to user
            
            cv.ShowImage("Target", color_image)
            
            # Listen for ESC or ENTER key
            # ### il faut avoir le focus sur l image
            c = cv.WaitKey(1) % 0x100
            if c == 27 or c == 10:
                com=LASER+' 0'
                res=os.system(com)
                if res != 0 :
                    print 'Can not run '+com
                com=LED+' 0'
                res=os.system(com)
                if res != 0 :
                    print 'Can not run '+com
                os.kill(os.getpid(), signal.SIGKILL)
                #break

if __name__=="__main__":
    
    com=LED+' 1'
    res=os.system(com)
    if res != 0 :
        print 'Can not run '+com

    # start
    t = Target()
    #import cProfile
    #cProfile.run( 't.run()' )
    t.run()