Tracking …

Détecte un object en mouvement (caméra sur RPI), prend une photo et l’envoie par mail.

import math,sys,os,cv2,time
import Queue,threading
import numpy as np
import signal
import datetime
import commands


import smtplib
import datetime,traceback

from email import Encoders
from email.MIMEText import MIMEText 
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart

import imutils

def sendMail(tfrom, to, subject,messhtml,tosenddir,filename):
    print 'in sendMail'

    try:
        server = smtplib.SMTP("smtp.gmail.com", 587)
        server.ehlo()
        server.starttls()
        server.ehlo()
        server.login("xxxx", "xxx")
        mail = MIMEMultipart()
        mail['From'] = tfrom
        mail['Subject'] = subject
        mail['To'] = to
        part2 = MIMEText(messhtml, 'html')
        mail.attach(part2)

        # now attach the file
        if filename != '':
            fileMsg = MIMEBase('application','octet-stream')
            fileMsg.set_payload(file(tosenddir+'/'+filename).read())
            Encoders.encode_base64(fileMsg)
            fileMsg.add_header('Content-Disposition', 'attachment; filename="'+filename+'"')
            mail.attach(fileMsg)
        server.sendmail(tfrom, [to], mail.as_string())
    except :
        print traceback.print_exc()
        return 1
    return 0

CAMERA=0 # 0 cam interne, 1 cam externe


WRITE_JPEG=True
SENDMAIL=False
CONF='1280x720'
#CONF='640x480'
TIMEBETWEENIMGSECS=2
BACKGROUND=True

# Size, colors parameters
SIZETOKEEP=5000

DIST=400 # 200

RPI=True

if RPI :
    import RPi.GPIO as GPIO
    GPIO.setwarnings(False)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(26,GPIO.OUT)




# BBoxes must be in the format:
# ( (topleft_x), (topleft_y) ), ( (bottomright_x), (bottomright_y) ) )
top = 0
bottom = 1
left = 0
right = 1


def merge_collided_bboxes( bbox_list ):
  # For every bbox...
  for this_bbox in bbox_list:
    
    # Collision detect every other bbox:
    for other_bbox in bbox_list:
      if this_bbox is other_bbox: continue  # Skip self
      
      # Assume a collision to start out with:
      has_collision = True
      
      # These coords are in screen coords, so > means 
      # "lower than" and "further right than".  And < 
      # means "higher than" and "further left than".
      
      # We also inflate the box size by 10% to deal with
      # fuzziness in the data.  (Without this, there are many times a bbox
      # is short of overlap by just one or two pixels.)
      if (this_bbox[bottom][0]*1.1 < other_bbox[top][0]*0.9): has_collision = False
      if (this_bbox[top][0]*.9 > other_bbox[bottom][0]*1.1): has_collision = False
      
      if (this_bbox[right][1]*1.1 < other_bbox[left][1]*0.9): has_collision = False
      if (this_bbox[left][1]*0.9 > other_bbox[right][1]*1.1): has_collision = False
      
      if has_collision:
        # merge these two bboxes into one, then start over:
        top_left_x = min( this_bbox[left][0], other_bbox[left][0] )
        top_left_y = min( this_bbox[left][1], other_bbox[left][1] )
        bottom_right_x = max( this_bbox[right][0], other_bbox[right][0] )
        bottom_right_y = max( this_bbox[right][1], other_bbox[right][1] )
        
        new_bbox = ( (top_left_x, top_left_y), (bottom_right_x, bottom_right_y) )
        
        bbox_list.remove( this_bbox )
        bbox_list.remove( other_bbox )
        bbox_list.append( new_bbox )
        
        # Start over with the new list:
        return merge_collided_bboxes( bbox_list )
  
  # When there are no collions between boxes, return that list:
  return bbox_list


class WorkerDraw(threading.Thread):

    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue

    def run(self):

        while True:
            point = self.queue.get()
            print '=========> point =',point
            

            self.queue.task_done()

class Target:
    def __init__(self):
        self.capture = cv2.VideoCapture(CAMERA)

        if CONF == '1280x720':
            print 'OK set'
            self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,1280)
            self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
            self.capture.set(cv2.CAP_PROP_FPS, 25)
        time.sleep(2)

        w = self.capture.get(cv2.CAP_PROP_FRAME_WIDTH)
        h = self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
        print w,h

        if BACKGROUND == False:
            cv2.namedWindow("Target", CAMERA)
            

    def run(self):

        out_queue=Queue.Queue(maxsize=1)
       
        t = WorkerDraw(out_queue)
        t.start()

        prev_x=0
        prev_y=0

        t0= time.time()
        t0_img= time.time()
        Process=False


        # initialize the first frame in the video stream
        firstFrame = None
        
        # loop over images
        while True:

             
                # Capture frame from webcam
                rval,color_image = self.capture.read()

                hsv = cv2.cvtColor(color_image,cv2.COLOR_BGR2HSV)

                if not rval :
                    print 'Pb cam'
                    break
                
                t1 = time.time()
                t1_img = time.time()

                if t1-t0 > 30 and Process == False: # 30s le temps de chauffer
                   Process=True
                   print 'Processing !'

                com="df -h / | awk '{print $5}' | grep -v Uti | sed 's/%//g'"
                res,out=commands.getstatusoutput(com)
                print out
                if int(out) > 80 :
                   print 'Not enough disk space !'
                   Process=False

                # resize the frame, convert it to grayscale, and blur it
          #color_image = imutils.resize(color_image, width=500) # commente par KB le 27/05/2018
          gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
          gray = cv2.GaussianBlur(gray, (21, 21), 0)
 
          # if the first frame is None, initialize it
          if firstFrame is None:
        firstFrame = gray
        continue

                # compute the absolute difference between the current frame and
          # first frame
                frameDelta = cv2.absdiff(firstFrame, gray)
          thresh = cv2.threshold(frameDelta, 15, 255, cv2.THRESH_BINARY)[1]
                
                if BACKGROUND == False :
                    cv2.imshow("diff",thresh)
                
          # dilate the thresholded image to fill in holes, then find contours
          # on thresholded image
          thresh = cv2.dilate(thresh, None, iterations=2)
          contour= cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
               
                # a commenter / decommenter 
                firstFrame=gray
                
                #######################################
                bounding_box_list = []

                for c in contour[1]:
                    bounding_rect = cv2.boundingRect( c )
                    point1 = ( bounding_rect[0], bounding_rect[1] )
                    point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )
                    bounding_box_list.append( ( point1, point2 ) )


                    # Find the average size of the bbox (targets), then
                    # remove any tiny bboxes (which are prolly just noise).
                    # "Tiny" is defined as any box with 1/10th the area of the average box.
                    # This reduces false positives on tiny "sparkles" noise.
                box_areas = []
                for box in bounding_box_list:
                    box_width = box[right][0] - box[left][0]
                    box_height = box[bottom][0] - box[top][0]
                    box_areas.append( box_width * box_height )		
                    #cv2.rectangle( color_image, box[0], box[1], [255,0,0], 1)

                average_box_area = 0.0
                if len(box_areas): 
                    average_box_area = float( sum(box_areas) ) / len(box_areas)

                trimmed_box_list = []
                for box in bounding_box_list:
                    box_width = box[right][0] - box[left][0]
                    box_height = box[bottom][0] - box[top][0]

                    # Only keep the box if it's not a tiny noise box:
                    print 'size=',box_width * box_height
                    if (box_width * box_height)-SIZETOKEEP > 200 : 
                        print 'ignore size'

                    #if (box_width * box_height) > average_box_area*0.1 and math.fabs((box_width * box_height)-SIZETOKEEP) < 200 : 
                    #if (box_width * box_height) > average_box_area*0.1  : 
                    trimmed_box_list.append( box )

                # Draw the trimmed box list:
                for box in trimmed_box_list:
                    # jaune
                    cv2.rectangle( color_image, box[0], box[1], [0,255,255], 2 )

                '''
                trimmed_box_list2=[]
                for box in trimmed_box_list:
                    box_width = box[1][0] - box[0][0]
                    box_height = box[1][1] - box[0][1]
                    print 'tbox=',(box[0][0],box[0][1],box_width,box_height)
                    trimmed_box_list2.append((box[0][0],box[0][1],box_width,box_height))

                trimmed_box_list2b=[]
                for b in trimmed_box_list2:
                    trimmed_box_list2b.append(b)
                
                trimmed_box_list2b,weights = cv2.groupRectangles(trimmed_box_list2b,1)
                '''
                bounding_box_list = merge_collided_bboxes( trimmed_box_list )

                print bounding_box_list,len(bounding_box_list)
                
                print 'estimated_target_count=',len(bounding_box_list)
                if Process == True :
                   print 'Processing !'

                # Draw the merged box list:
                for box in bounding_box_list:
                    #vert
                    #cv2.rectangle( color_image, (box[0],box[1]),(box[0]+box[2],box[1]+box[3]), [0,255,0], 1 ) # box pas bons
                    cv2.rectangle( color_image, box[0], box[1], [0,255,0], 1 )              
                    #x=int(box[0]+box[2]/2.0)
                    #y=int(box[1]+box[3]/2.0)
                    x=int((box[0][0]+box[1][0])/2.0)
                    y=int((box[0][1]+box[1][1])/2.0)
                    center_point=(x,y)
                    #print center_point

                    center_point=(x,y)
                   
                    box_width = box[1][0] - box[0][0]
                    box_height = box[1][1] - box[0][1]
                    #box_width = box[2] - box[0]
                    #box_height = box[3] - box[1]
                    
                    thesize=box_height*box_width 
                    print 'thesize=',thesize

                    dist=(x-prev_x)*(x-prev_x)+(y-prev_y)*(y-prev_y)
                    print "dist=",dist
                    prev_x=x
                    prev_y=y
                    print 'hsv x,y=',x,y
                    px=hsv[y,x]
                    print 'px=',px
                    h=px[0]
                    s=px[1]
                    v=px[2]

        if thesize >SIZETOKEEP and out_queue.empty() and dist>DIST  and Process == True :
        #if Process==True:
                        print 'ADDED','x=',x,'y=',y

                        print 'thesize=',thesize
                        out_queue.put({'point': (x,y)})
                        #cv2.rectangle( color_image, (box[0],box[1]), (box[0]+box[2],box[1]+box[3]), [0,255,0], 1) #vert
                        cv2.rectangle( color_image, box[0], box[1], [0,255,0], 1)
                        cv2.circle(color_image, center_point, 20, [255, 255,255], 1)
                        cv2.circle(color_image, center_point, 15, [100, 255, 255], 1)
                        cv2.circle(color_image, center_point, 10, [255, 255, 255], 2)
                        cv2.circle(color_image, center_point, 5, [100, 255, 255], 3)
                        if WRITE_JPEG and t1_img-t0_img > TIMEBETWEENIMGSECS:
                            if RPI:
                                GPIO.output(26, GPIO.HIGH)
                                
                            t0_img=t1_img
                            filename='target_'+datetime.datetime.now().strftime("%Y%m%d%H%M%S")+'_x'+str(x)+'_y'+str(y)+'_thesize'+str(thesize)+'.jpg'
                            cv2.imwrite(filename,color_image)
                            if SENDMAIL == True:
                                res=sendMail('adresse mail expediteur', 'adresse mail de destination', 'mous','','/home/karim/perso/moustiques/',filename)
                                print res
                            if RPI:
                                GPIO.output(26, GPIO.LOW)
                        
                # Display frame to user
                if BACKGROUND == False:   
                    cv2.imshow("Target", color_image)

                # Listen for ESC or ENTER key
                # ### il faut avoir le focus sur l image
                c = cv2.waitKey(1) % 0x100
                if c == 27 or c == 10:
                    os.kill(os.getpid(), signal.SIGKILL)
                   
if __name__=="__main__":
    
    # start
    t = Target()
    #import cProfile
    #cProfile.run( 't.run()' )
    t.run()