Skip to content

Commit

Permalink
feat(single-mode): recognize aoharu soul
Browse files Browse the repository at this point in the history
  • Loading branch information
NateScarlet committed Aug 30, 2021
1 parent b01d706 commit 6131303
Show file tree
Hide file tree
Showing 11 changed files with 111 additions and 15 deletions.
6 changes: 6 additions & 0 deletions auto_derby/imagetools.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,12 @@ def bg_mask_by_outline(outline_img: np.ndarray) -> np.ndarray:
return border_flood_fill(outline_img)


def inside_outline(img: np.ndarray, outline_img: np.ndarray) -> np.ndarray:
bg_mask = border_flood_fill(outline_img)
fg_mask = 255 - bg_mask
return cv2.copyTo(img, fg_mask)


def resize(
img: Image,
*,
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Training<lv=1 sta=13 gut=4 ski=4 ptn=spd@1,oth@2^>
Training<lv=1 sta=13 gut=4 ski=4 ptn=spd@1[0%],sta@2[0%]>
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Training<lv=1 gut=10 pow=8 spd=6 ski=5 ptn=spd@1^,oth@1^>
Training<lv=1 gut=10 pow=8 spd=6 ski=5 ptn=spd@1^[0%],pow@1^[0%]>
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Training<lv=1 spd=12 pow=5 ski=2>
Training<lv=1 spd=12 pow=5 ski=2 ptn=tm^[0%]>
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Training<lv=1 wis=10 spd=5 ski=4 ptn=frd@2,spd[0%],tm[0%]>
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Training<lv=1 sta=9 gut=4 ski=2 ptn=oth,tm@2[16%]>
101 changes: 95 additions & 6 deletions auto_derby/single_mode/training/partner.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,52 @@ def _recognize_level(rp: mathtools.ResizeProxy, icon_img: Image) -> int:
return -1


def _recognize_soul(img: Image) -> float:
img = imagetools.resize(img, height=40)
soul_img = imagetools.cv_image(img)
blue_outline_img = imagetools.constant_color_key(
soul_img,
(251, 109, 0),
(255, 178, 99),
threshold=0.6,
)
soul_img = imagetools.inside_outline(soul_img, blue_outline_img)
shapened_img = imagetools.mix(imagetools.sharpen(soul_img, 1), soul_img, 0.5)
white_outline_img = imagetools.constant_color_key(
shapened_img,
(255, 255, 255),
(252, 251, 251),
(248, 227, 159),
threshold=0.9,
)
bg_mask = imagetools.border_flood_fill(white_outline_img)
fg_mask = 255 - bg_mask
imagetools.fill_area(fg_mask, (0,), size_lt=100)
fg_img = cv2.copyTo(soul_img, fg_mask)
empty_mask = imagetools.constant_color_key(fg_img, (126, 121, 121))
if os.getenv("DEBUG") == __name__:
_LOGGER.debug(
"soul: img=%s",
imagetools.image_hash(img, save_path=g.image_path),
)
cv2.imshow("soul_img", soul_img)
cv2.imshow("sharpened_img", shapened_img)
cv2.imshow("blue_outline_img", blue_outline_img)
cv2.imshow("white_outline_img", white_outline_img)
cv2.imshow("bg_mask", bg_mask)
cv2.imshow("fg_mask", fg_mask)
cv2.imshow("empty_mask", empty_mask)
cv2.waitKey()
cv2.destroyAllWindows()

fg_avg = np.average(fg_mask)
if fg_avg < 100:
return -1
empty_avg = np.average(empty_mask)
outline_avg = 45
return max(0, min(1, 1 - (empty_avg / (fg_avg - outline_avg))))


class Partner:
TYPE_SPEED: int = 1
TYPE_STAMINA: int = 2
Expand All @@ -133,18 +179,20 @@ class Partner:
TYPE_WISDOM: int = 5
TYPE_FRIEND: int = 6
TYPE_OTHER: int = 7
TYPE_TEAMMATE: int = 8

def __init__(self):
self.level = 0
self.type = 0
self.has_hint = False
self.has_training = False
self.soul = -1
self.icon_bbox = (0, 0, 0, 0)

def __str__(self):
return (
f"Partner<type={self.type_text(self.type)} lv={self.level} "
f"hint={self.has_hint} training={self.has_training} icon_bbox={self.icon_bbox}>)"
f"hint={self.has_hint} training={self.has_training} soul={self.soul} icon_bbox={self.icon_bbox}>)"
)

def score(self, ctx: Context) -> float:
Expand Down Expand Up @@ -177,14 +225,26 @@ def type_text(v: int) -> Text:
Partner.TYPE_WISDOM: "wis",
Partner.TYPE_FRIEND: "frd",
Partner.TYPE_OTHER: "oth",
Partner.TYPE_TEAMMATE: "tm",
}.get(v, f"unknown({v})")

@classmethod
def _from_training_scene_icon(
cls, img: Image, bbox: Tuple[int, int, int, int]
cls, ctx: Context, img: Image, bbox: Tuple[int, int, int, int]
) -> Optional[Partner]:
rp = mathtools.ResizeProxy(img.width)
icon_img = img.crop(bbox)
soul = -1
if ctx.scenario == ctx.SCENARIO_AOHARU:
soul_bbox = (
bbox[0] - rp.vector(35, 540),
bbox[1] + rp.vector(33, 540),
bbox[0] + rp.vector(2, 540),
bbox[3] - rp.vector(0, 540),
)
soul_img = img.crop(soul_bbox)
soul = _recognize_soul(soul_img)

if os.getenv("DEBUG") == __name__:
_LOGGER.debug(
"icon: img=%s",
Expand All @@ -194,26 +254,43 @@ def _from_training_scene_icon(
cv2.waitKey()
cv2.destroyAllWindows()
level = _recognize_level(rp, icon_img)
if level < 0:
if level < 0 and soul < 0:
return None
self = cls.new()
self.icon_bbox = bbox
self.level = level
self.soul = soul
self.has_hint = _recognize_has_hint(rp, icon_img)
self.has_training = _recognize_has_training(rp, icon_img)
self.type = _recognize_type_color(rp, icon_img)
if soul >= 0 and self.type == Partner.TYPE_OTHER:
self.type = Partner.TYPE_TEAMMATE
_LOGGER.debug("partner: %s", self)
return self

@classmethod
def from_training_scene(cls, img: Image) -> Iterator[Partner]:
ctx = Context()
ctx.scenario = ctx.SCENARIO_URA
return cls.from_training_scene_v2(ctx, img)

@classmethod
def from_training_scene_v2(cls, ctx: Context, img: Image) -> Iterator[Partner]:
rp = mathtools.ResizeProxy(img.width)

icon_bbox = rp.vector4((448, 146, 516, 220), 540)
icon_y_offset = rp.vector(90, 540)
icon_bbox, icon_y_offset = {
ctx.SCENARIO_URA: (
rp.vector4((448, 146, 516, 220), 540),
rp.vector(90, 540),
),
ctx.SCENARIO_AOHARU: (
rp.vector4((448, 147, 516, 220), 540),
rp.vector(86, 540),
),
}[ctx.scenario]
icons_bottom = rp.vector(578, 540)
while icon_bbox[2] < icons_bottom:
v = cls._from_training_scene_icon(img, icon_bbox)
v = cls._from_training_scene_icon(ctx, img, icon_bbox)
if not v:
break
yield v
Expand All @@ -224,5 +301,17 @@ def from_training_scene(cls, img: Image) -> Iterator[Partner]:
icon_bbox[3] + icon_y_offset,
)

def to_short_text(self):
ret = self.type_text(self.type)
if self.level > 0:
ret += f"@{self.level}"
if self.has_hint:
ret += "!"
if self.has_training:
ret += "^"
if self.soul >= 0:
ret += f"[{round(self.soul * 100)}%]"
return ret


g.partner_class = Partner
7 changes: 2 additions & 5 deletions auto_derby/single_mode/training/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ def from_training_scene_v2(
self._use_estimate_vitality = True
# TODO: recognize failure rate
self._use_estimate_failure_rate = True
self.partners = tuple(Partner.from_training_scene(img))
self.partners = tuple(Partner.from_training_scene_v2(ctx, img))
return self

def __str__(self):
Expand All @@ -319,10 +319,7 @@ def __str__(self):
("wis", self.wisdom),
("ski", self.skill),
)
partner_text = ",".join(
f"{i.type_text(i.type)}@{i.level}{'!' if i.has_hint else ''}{'^' if i.has_training else ''}"
for i in self.partners
)
partner_text = ",".join(i.to_short_text() for i in self.partners)
return (
"Training<"
f"lv={self.level} "
Expand Down
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 3 additions & 1 deletion scripts/recognize_training_scene.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ def main():
"aoharu": Context.SCENARIO_AOHARU,
}.get(args.scenario, args.scenario)
image = imagetools.resize(PIL.Image.open(image_path), width=template.TARGET_WIDTH)
training = single_mode.Training.from_training_scene(image, scenario=scenario)
ctx = Context.new()
ctx.scenario = scenario
training = single_mode.Training.from_training_scene_v2(ctx, image)
print(training)


Expand Down

0 comments on commit 6131303

Please sign in to comment.