|
| 1 | +from typing import Any |
| 2 | + |
1 | 3 | import aiohttp |
2 | 4 | from aiolimiter import AsyncLimiter |
3 | | -from typing import Any |
4 | | -from .teslafleetapi import TeslaFleetApi |
5 | | -from .charging import Charging |
6 | | -from .energy import Energy |
7 | | -from .partner import Partner |
8 | | -from .user import User |
9 | | -from .vehicle import Vehicle |
10 | | -from .vehiclespecific import VehicleSpecific |
11 | 5 |
|
12 | | -from .const import Method, LOGGER, Scope |
| 6 | +from .const import LOGGER, Method, VehicleDataEndpoint |
| 7 | +from .teslafleetapi import TeslaFleetApi |
13 | 8 |
|
14 | 9 | # Rate limit should be global, even if multiple instances are created |
15 | 10 | rate_limit = AsyncLimiter(5, 10) |
@@ -101,6 +96,22 @@ async def server_side_polling( |
101 | 96 | ) |
102 | 97 | ).get("response") |
103 | 98 |
|
| 99 | + async def vehicle_force_refresh(self, vin: str) -> dict[str, Any]: |
| 100 | + """Force a refresh of the vehicle data.""" |
| 101 | + return await self._request( |
| 102 | + Method.GET, |
| 103 | + f"api/force/{vin}", |
| 104 | + ) |
| 105 | + |
| 106 | + async def vehicle_data_cached(self, vin: str, endpoints: list[VehicleDataEndpoint | str] | None = None,) -> dict[str, Any]: |
| 107 | + """Get cached vehicle data.""" |
| 108 | + endpoint_payload = ";".join(endpoints) if endpoints else None |
| 109 | + return await self._request( |
| 110 | + Method.GET, |
| 111 | + f"api/x/vehicles/{vin}/vehicle_data", |
| 112 | + {"endpoints": endpoint_payload} |
| 113 | + ) |
| 114 | + |
104 | 115 | async def _request( |
105 | 116 | self, |
106 | 117 | method: Method, |
|
0 commit comments