Skip to content

Commit

Permalink
FtcRobotController v8.0
Browse files Browse the repository at this point in the history
  • Loading branch information
CalKestis committed Sep 7, 2022
1 parent aba72e5 commit e0282fc
Show file tree
Hide file tree
Showing 24 changed files with 822 additions and 426 deletions.
5 changes: 2 additions & 3 deletions FtcRobotController/src/main/AndroidManifest.xml
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:versionCode="45"
android:versionName="7.2">
android:versionCode="47"
android:versionName="8.0">

<uses-permission android:name="android.permission.RECEIVE_BOOT_COMPLETED" />

<application
android:allowBackup="true"
android:largeHeap="true"
android:extractNativeLibs="true"
android:icon="@drawable/ic_launcher"
android:label="@string/app_name"
android:theme="@style/AppThemeRedRC"
Expand Down
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
/* Copyright (c) 2022 FIRST. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted (subject to the limitations in the disclaimer below) provided that
* the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

package org.firstinspires.ftc.robotcontroller.external.samples;

import com.qualcomm.robotcore.eventloop.opmode.Disabled;
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
import com.qualcomm.robotcore.util.Range;

/**
* This OpMode Sample illustrates how to use an external "hardware" class to modularize all the robot's sensors and actuators.
* This approach is very efficient because the same hardware class can be used by all of your teleop and autonomous OpModes
* without requiring many copy & paste operations. Once you have defined and tested the hardware class with one OpMode,
* it is instantly available to other OpModes.
*
* The real benefit of this approach is that as you tweak your robot hardware, you only need to make changes in ONE place (the Hardware Class).
* So, to be effective you should put as much or your hardware setup and access code as possible in the hardware class.
* Essentially anything you do with hardware in BOTH Teleop and Auto should likely go in the hardware class.
*
* The Hardware Class is created in a separate file, and then an "instance" of this class is created in each OpMode.
* In order for the class to do typical OpMode things (like send telemetry data) it must be passed a reference to the
* OpMode object when it's created, so it can access all core OpMode functions. This is illustrated below.
*
* In this concept sample, the hardware class file is called RobotHardware.java and it must accompany this sample OpMode.
* So, if you copy ConceptExternalHardwareClass.java into TeamCode (using Android Studio or OnBotJava) then RobotHardware.java
* must also be copied to the same location (maintaining its name).
*
* For comparison purposes, this sample and its accompanying hardware class duplicates the functionality of the
* RobotTelopPOV_Linear opmode. It assumes three motors (left_drive, right_drive and arm) and two servos (left_hand and right_hand)
*
* View the RobotHardware.java class file for more details
*
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
* Remove or comment out the @Disabled line to add this opmode to the Driver Station OpMode list
*
* In OnBot Java, add a new OpMode, drawing from this Sample; select TeleOp.
* Also add another new file named RobotHardware.java, drawing from the Sample with that name; select Not an OpMode.
*/

@TeleOp(name="Concept: Robot Hardware Class", group="Robot")
@Disabled
public class ConceptExternalHardwareClass extends LinearOpMode {

// Create a RobotHardware object to be used to access robot hardware.
// Prefix any hardware functions with "robot." to access this class.
RobotHardware robot = new RobotHardware(this);

@Override
public void runOpMode() {
double drive = 0;
double turn = 0;
double arm = 0;
double handOffset = 0;

// initialize all the hardware, using the hardware class. See how clean and simple this is?
robot.init();

// Send telemetry message to signify robot waiting;
// Wait for the game to start (driver presses PLAY)
waitForStart();

// run until the end of the match (driver presses STOP)
while (opModeIsActive()) {

// Run wheels in POV mode (note: The joystick goes negative when pushed forward, so negate it)
// In this mode the Left stick moves the robot fwd and back, the Right stick turns left and right.
// This way it's also easy to just drive straight, or just turn.
drive = -gamepad1.left_stick_y;
turn = gamepad1.right_stick_x;

// Combine drive and turn for blended motion. Use RobotHardware class
robot.driveRobot(drive, turn);

// Use gamepad left & right Bumpers to open and close the claw
// Use the SERVO constants defined in RobotHardware class.
// Each time around the loop, the servos will move by a small amount.
// Limit the total offset to half of the full travel range
if (gamepad1.right_bumper)
handOffset += robot.HAND_SPEED;
else if (gamepad1.left_bumper)
handOffset -= robot.HAND_SPEED;
handOffset = Range.clip(handOffset, -0.5, 0.5);

// Move both servos to new position. Use RobotHardware class
robot.setHandPositions(handOffset);

// Use gamepad buttons to move arm up (Y) and down (A)
// Use the MOTOR constants defined in RobotHardware class.
if (gamepad1.y)
arm = robot.ARM_UP_POWER;
else if (gamepad1.a)
arm = robot.ARM_DOWN_POWER;
else
arm = 0;

robot.setArmPower(arm);

// Send telemetry messages to explain controls and show robot status
telemetry.addData("Drive", "Left Stick");
telemetry.addData("Turn", "Right Stick");
telemetry.addData("Arm Up/Down", "Y & A Buttons");
telemetry.addData("Hand Open/Closed", "Left and Right Bumpers");
telemetry.addData("-", "-------");

telemetry.addData("Drive Power", "%.2f", drive);
telemetry.addData("Turn Power", "%.2f", turn);
telemetry.addData("Arm Power", "%.2f", arm);
telemetry.addData("Hand Position", "Offset = %.2f", handOffset);
telemetry.update();

// Pace this loop so hands move at a reasonable speed.
sleep(50);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,28 @@
Three scenarios are tested:
Cache Mode = OFF This is the normal default, where no cache is used, and every read produces a discrete transaction with
an expansion hub, which is the slowest approach.
an expansion hub, which is the slowest approach, but guarentees that the value is as fresh (recent) as possible..
Cache Mode = AUTO This mode will attempt to minimize the number of discrete read commands, by performing bulk-reads
and then returning values that have been cached. The cache is updated automatically whenever a specific read operation is repeated.
This mode will always return fresh data, but it may perform more bulk-reads than absolutely required.
Extra reads will be performed if multiple identical encoder/velocity reads are performed in one control cycle.
and then returning values that have been cached. The cache is updated automatically whenever any specific encoder is re-read.
This mode will always return new data, but it may perform more bulk-reads than absolutely required.
Extra reads will be performed if multiple encoder/velocity reads are performed on the same encoder in one control cycle.
This mode is a good compromise between the OFF and MANUAL modes.
Cache Mode = MANUAL This mode enables the user's code to determine the best time to refresh the cached bulk-read data.
Well organized code can place all the sensor reads in one location, and then just reset the cache once per control cycle.
The approach will produce the shortest cycle times, but it does require the user to manually clear the cache.
Note: If there are significant user-program delays between encoder reads, the cached value may not be fresh (recent).
You can issue a clearBulkCache() call at any time force a fresh bulk-read on the next encoder read.
Cache Mode = MANUAL This mode requires the user's code to determine the best time to clear the cached bulk-read data.
Well organized code will reset the cache once at the beginning of the control cycle, and then immediately read and store all the encoder values.
This approach will produce the shortest cycle times, but it does require the user to manually clear the cache.
Since NO automatic Bulk-Reads are performed, neglecting to clear the bulk cache will result in the same values being returned
each time an encoder read is performed.
-------------------------------------
General tip to speed up your control cycles:
No matter what method you use to read encoders and other inputs, you should try to
avoid reading the same input multiple times around a control loop.
avoid reading the same encoder input multiple times around a control loop.
Under normal conditions, this will slow down the control loop.
The preferred method is to read all the required inputs ONCE at the beginning of the loop,
and save the values in variable that can be used by other parts of the control code.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@
import org.firstinspires.ftc.robotcore.external.tfod.Recognition;

/**
* This 2020-2021 OpMode illustrates the basics of using the TensorFlow Object Detection API to
* determine the position of the Freight Frenzy game elements.
* This 2022-2023 OpMode illustrates the basics of using the TensorFlow Object Detection API to
* determine which image is being presented to the robot.
*
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
Expand All @@ -52,23 +52,21 @@
@TeleOp(name = "Concept: TensorFlow Object Detection", group = "Concept")
@Disabled
public class ConceptTensorFlowObjectDetection extends LinearOpMode {
/* Note: This sample uses the all-objects Tensor Flow model (FreightFrenzy_BCDM.tflite), which contains
* the following 4 detectable objects
* 0: Ball,
* 1: Cube,
* 2: Duck,
* 3: Marker (duck location tape marker)
*
* Two additional model assets are available which only contain a subset of the objects:
* FreightFrenzy_BC.tflite 0: Ball, 1: Cube
* FreightFrenzy_DM.tflite 0: Duck, 1: Marker
*/
private static final String TFOD_MODEL_ASSET = "FreightFrenzy_BCDM.tflite";

/*
* Specify the source for the Tensor Flow Model.
* If the TensorFlowLite object model is included in the Robot Controller App as an "asset",
* the OpMode must to load it using loadModelFromAsset(). However, if a team generated model
* has been downloaded to the Robot Controller's SD FLASH memory, it must to be loaded using loadModelFromFile()
* Here we assume it's an Asset. Also see method initTfod() below .
*/
private static final String TFOD_MODEL_ASSET = "PowerPlay.tflite";
// private static final String TFOD_MODEL_FILE = "/sdcard/FIRST/tflitemodels/CustomTeamModel.tflite";

private static final String[] LABELS = {
"Ball",
"Cube",
"Duck",
"Marker"
"1 Bolt",
"2 Bulb",
"3 Panel"
};

/*
Expand Down Expand Up @@ -114,11 +112,11 @@ public void runOpMode() {

// The TensorFlow software will scale the input images from the camera to a lower resolution.
// This can result in lower detection accuracy at longer distances (> 55cm or 22").
// If your target is at distance greater than 50 cm (20") you can adjust the magnification value
// If your target is at distance greater than 50 cm (20") you can increase the magnification value
// to artificially zoom in to the center of image. For best results, the "aspectRatio" argument
// should be set to the value of the images used to create the TensorFlow Object Detection model
// (typically 16/9).
tfod.setZoom(2.5, 16.0/9.0);
tfod.setZoom(1.0, 16.0/9.0);
}

/** Wait for the game to begin */
Expand All @@ -133,19 +131,22 @@ public void runOpMode() {
// the last time that call was made.
List<Recognition> updatedRecognitions = tfod.getUpdatedRecognitions();
if (updatedRecognitions != null) {
telemetry.addData("# Object Detected", updatedRecognitions.size());

// step through the list of recognitions and display boundary info.
int i = 0;
for (Recognition recognition : updatedRecognitions) {
telemetry.addData(String.format("label (%d)", i), recognition.getLabel());
telemetry.addData(String.format(" left,top (%d)", i), "%.03f , %.03f",
recognition.getLeft(), recognition.getTop());
telemetry.addData(String.format(" right,bottom (%d)", i), "%.03f , %.03f",
recognition.getRight(), recognition.getBottom());
i++;
}
telemetry.update();
telemetry.addData("# Objects Detected", updatedRecognitions.size());

// step through the list of recognitions and display image position/size information for each one
// Note: "Image number" refers to the randomized image orientation/number
for (Recognition recognition : updatedRecognitions) {
double col = (recognition.getLeft() + recognition.getRight()) / 2 ;
double row = (recognition.getTop() + recognition.getBottom()) / 2 ;
double width = Math.abs(recognition.getRight() - recognition.getLeft()) ;
double height = Math.abs(recognition.getTop() - recognition.getBottom()) ;

telemetry.addData(""," ");
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100 );
telemetry.addData("- Position (Row/Col)","%.0f / %.0f", row, col);
telemetry.addData("- Size (Width/Height)","%.0f / %.0f", width, height);
}
telemetry.update();
}
}
}
Expand All @@ -166,8 +167,6 @@ private void initVuforia() {

// Instantiate the Vuforia engine
vuforia = ClassFactory.getInstance().createVuforia(parameters);

// Loading trackables is not necessary for the TensorFlow Object Detection engine.
}

/**
Expand All @@ -177,10 +176,14 @@ private void initTfod() {
int tfodMonitorViewId = hardwareMap.appContext.getResources().getIdentifier(
"tfodMonitorViewId", "id", hardwareMap.appContext.getPackageName());
TFObjectDetector.Parameters tfodParameters = new TFObjectDetector.Parameters(tfodMonitorViewId);
tfodParameters.minResultConfidence = 0.8f;
tfodParameters.minResultConfidence = 0.75f;
tfodParameters.isModelTensorFlow2 = true;
tfodParameters.inputSize = 320;
tfodParameters.inputSize = 300;
tfod = ClassFactory.getInstance().createTFObjectDetector(tfodParameters, vuforia);

// Use loadModelFromAsset() if the TF Model is built in as an asset by Android Studio
// Use loadModelFromFile() if you have downloaded a custom team model to the Robot Controller's FLASH.
tfod.loadModelFromAsset(TFOD_MODEL_ASSET, LABELS);
// tfod.loadModelFromFile(TFOD_MODEL_FILE, LABELS);
}
}
Loading

0 comments on commit e0282fc

Please sign in to comment.