Python Forum
picamera not working on premade script
Thread Rating:
  • 0 Vote(s) - 0 Average
  • 1
  • 2
  • 3
  • 4
  • 5
picamera not working on premade script
#1
I bought a robot kit for the pi. and in completion everything works fine minus the fpv the picamera I contacted seller and they sent me to links and videos that weren't detailed, lacking customer service have been working on this with friend for a week and still no luck. On my computer the robot has a python file called client and the py has a file called server, the client script runs an interface everything seems to work fine but the view from the camera is blank. the red light is on, I have ran stock command for picamera to see if it is work to get a live view with no luck I have switched out the cameras thinking there was a problem on that en but ran the command to see if the cam module is detected and it but the general picamera commands for functions dont work and the seller has said those commands can't be used while in use with the robot/interface running, but as soon as I power on the pi it automatically starts the script and robot initiates.

I will paste down below the camera script that the server runs. If anyone has any ideas as to what I am doing wrong please let me know
#!/usr/bin/env/python3
# File name   : server.py
# Description : for FPV video and OpenCV functions
# Website     : www.adeept.com
# E-mail      : [email protected]
# Author      : William(Based on Adrian Rosebrock's OpenCV code on pyimagesearch.com)
# Date        : 2018/08/22

import time
import threading
import cv2
import zmq
import base64
import picamera
from picamera.array import PiRGBArray
import argparse
import imutils
from collections import deque
import psutil
import os
import servo
import PID
import LED
import datetime
from rpi_ws281x import *
import move

pid = PID.PID()
pid.SetKp(0.5)
pid.SetKd(0)
pid.SetKi(0)
Y_lock = 0
X_lock = 0
tor    = 17
FindColorMode = 0
WatchDogMode  = 0
UltraData = 3
LED  = LED.LED()

class FPV: 
    def __init__(self):
        self.frame_num = 0
        self.fps = 0

        self.colorUpper = (44, 255, 255)
        self.colorLower = (24, 100, 100)


    def SetIP(self,invar):
        self.IP = invar


    def FindColor(self,invar):
        global FindColorMode
        FindColorMode = invar
        if not FindColorMode:
            servo.camera_ang('home',0)


    def WatchDog(self,invar):
        global WatchDogMode
        WatchDogMode = invar


    def UltraData(self,invar):
        global UltraData
        UltraData = invar


    def capture_thread(self,IPinver):
        ap = argparse.ArgumentParser()            #OpenCV initialization
        ap.add_argument("-b", "--buffer", type=int, default=64,
            help="max buffer size")
        args = vars(ap.parse_args())
        pts = deque(maxlen=args["buffer"])

        font = cv2.FONT_HERSHEY_SIMPLEX

        camera = picamera.PiCamera() 
        camera.resolution = (640, 480)
        camera.framerate = 20
        rawCapture = PiRGBArray(camera, size=(640, 480))

        context = zmq.Context()
        footage_socket = context.socket(zmq.PUB)
        print(IPinver)
        footage_socket.connect('tcp://%s:5555'%IPinver)

        avg = None
        motionCounter = 0
        #time.sleep(4)
        lastMovtionCaptured = datetime.datetime.now()

        for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
            frame_image = frame.array
            cv2.line(frame_image,(300,240),(340,240),(128,255,128),1)
            cv2.line(frame_image,(320,220),(320,260),(128,255,128),1)
            timestamp = datetime.datetime.now()

            if FindColorMode:
                
                ####>>>OpenCV Start<<<####
                hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv, self.colorLower, self.colorUpper)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    cv2.putText(frame_image,'Target Detected',(40,60), font, 0.5,(255,255,255),1,cv2.LINE_AA)
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
                    X = int(x)
                    Y = int(y)
                    if radius > 10:
                        cv2.rectangle(frame_image,(int(x-radius),int(y+radius)),(int(x+radius),int(y-radius)),(255,255,255),1)

                    if Y < (240-tor):
                        error = (240-Y)/5
                        outv = int(round((pid.GenOut(error)),0))
                        servo.camera_ang('lookup',outv)
                        Y_lock = 0
                    elif Y > (240+tor):
                        error = (Y-240)/5
                        outv = int(round((pid.GenOut(error)),0))
                        servo.camera_ang('lookdown',outv)
                        Y_lock = 0
                    else:
                        Y_lock = 1

                    
                    if X < (320-tor*3):
                        move.move(70, 'no', 'left', 0.6)
                        #time.sleep(0.1)
                        #move.motorStop()
                        X_lock = 0
                    elif X > (330+tor*3):
                        move.move(70, 'no', 'right', 0.6)
                        #time.sleep(0.1)
                        #move.motorStop()
                        X_lock = 0
                    else:
                        move.motorStop()
                        X_lock = 1

                    if X_lock == 1 and Y_lock == 1:
                        if UltraData > 0.5:
                            move.move(70, 'forward', 'no', 0.6)
                        elif UltraData < 0.4:
                            move.move(70, 'backward', 'no', 0.6)
                            print(UltraData)
                        else:
                            move.motorStop()
                    

                else:
                    cv2.putText(frame_image,'Target Detecting',(40,60), font, 0.5,(255,255,255),1,cv2.LINE_AA)
                    move.motorStop()

                for i in range(1, len(pts)):
                    if pts[i - 1] is None or pts[i] is None:
                        continue
                    thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
                    cv2.line(frame_image, pts[i - 1], pts[i], (0, 0, 255), thickness)
                ####>>>OpenCV Ends<<<####
                

            if WatchDogMode:
                gray = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (21, 21), 0)

                if avg is None:
                    print("[INFO] starting background model...")
                    avg = gray.copy().astype("float")
                    rawCapture.truncate(0)
                    continue

                cv2.accumulateWeighted(gray, avg, 0.5)
                frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

                # threshold the delta image, dilate the thresholded image to fill
                # in holes, then find contours on thresholded image
                thresh = cv2.threshold(frameDelta, 5, 255,
                    cv2.THRESH_BINARY)[1]
                thresh = cv2.dilate(thresh, None, iterations=2)
                cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE)
                cnts = imutils.grab_contours(cnts)
                #print('x')
             
                # loop over the contours
                for c in cnts:
                    # if the contour is too small, ignore it
                    if cv2.contourArea(c) < 5000:
                        continue
             
                    # compute the bounding box for the contour, draw it on the frame,
                    # and update the text
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(frame_image, (x, y), (x + w, y + h), (128, 255, 0), 1)
                    text = "Occupied"
                    motionCounter += 1
                    #print(motionCounter)
                    #print(text)
                    LED.colorWipe(Color(255,16,0))
                    lastMovtionCaptured = timestamp

                if (timestamp - lastMovtionCaptured).seconds >= 0.5:
                    LED.colorWipe(Color(255,255,0))


            encoded, buffer = cv2.imencode('.jpg', frame_image)
            jpg_as_text = base64.b64encode(buffer)
            footage_socket.send(jpg_as_text)

            rawCapture.truncate(0)


if __name__ == '__main__':
    fpv=FPV()
    while 1:
        fpv.capture_thread('192.168.0.110')
        pass
Reply
#2
Could you please share the link to this robot kit?
I think your best choice will be using a official Raspbian image and try to access the picamera.
Maybe this is a hardware issue.
Reply


Possibly Related Threads…
Thread Author Replies Views Last Post
  PiCamera - print exceptions? korenron 2 790 Dec-15-2022, 10:48 PM
Last Post: Larz60+
  Split recording with Picamera EvanS1 0 1,880 Jun-19-2021, 12:26 PM
Last Post: EvanS1
  for loop script over telnet in Python 3.5 is not working abhijithd123 1 2,860 May-10-2020, 03:22 AM
Last Post: bowlofred
  countdown script not working..plz help what is mistake randyjack 1 2,079 Oct-28-2019, 06:57 AM
Last Post: perfringo
  urlparse to urllib.parse - the script stopped working apollo 5 6,615 Oct-26-2017, 06:57 AM
Last Post: apollo
  [split] Script has stopped working santhosh 1 3,153 Feb-27-2017, 03:21 PM
Last Post: Ofnuts
  Script has stopped working NickBurrett 2 3,844 Feb-22-2017, 07:10 PM
Last Post: NickBurrett

Forum Jump:

User Panel Messages

Announcements
Announcement #1 8/1/2020
Announcement #2 8/2/2020
Announcement #3 8/6/2020