11"""The LoadShedding component."""
22from __future__ import annotations
33
4- from datetime import datetime , timedelta , timezone
4+ from datetime import UTC , datetime , timedelta , timezone
55import logging
66from typing import Any
77
88from load_shedding .libs .sepush import SePush , SePushError
99from load_shedding .providers import Area , Stage
10+ import urllib3
1011
1112from homeassistant .config_entries import ConfigEntry
1213from homeassistant .const import (
@@ -186,7 +187,7 @@ def __init__(self, hass: HomeAssistant, sepush: SePush) -> None:
186187 async def _async_update_data (self ) -> dict :
187188 """Retrieve latest load shedding data."""
188189
189- now = datetime .now (datetime . UTC ).replace (microsecond = 0 )
190+ now = datetime .now (UTC ).replace (microsecond = 0 )
190191 diff = 0
191192 if self .last_update is not None :
192193 diff = (now - self .last_update ).seconds
@@ -196,6 +197,9 @@ async def _async_update_data(self) -> dict:
196197
197198 try :
198199 stage = await self .async_update_stage ()
200+ except SePushError as err :
201+ _LOGGER .error ("Unable to get stage: %s" , err )
202+ self .data = {}
199203 except UpdateFailed as err :
200204 _LOGGER .exception ("Unable to get stage: %s" , err )
201205 self .data = {}
@@ -207,60 +211,55 @@ async def _async_update_data(self) -> dict:
207211
208212 async def async_update_stage (self ) -> dict :
209213 """Retrieve latest stage."""
210- now = datetime .now (datetime .UTC ).replace (microsecond = 0 )
211- try :
212- esp = await self .hass .async_add_executor_job (self .sepush .status )
213- except SePushError as err :
214- raise UpdateFailed (err ) from err
215- else :
216- data = {}
217- statuses = esp .get ("status" , {})
218- for idx , area in statuses .items ():
219- stage = Stage (int (area .get ("stage" , "0" )))
220- start_time = datetime .fromisoformat (area .get ("stage_updated" ))
214+ now = datetime .now (UTC ).replace (microsecond = 0 )
215+ esp = await self .hass .async_add_executor_job (self .sepush .status )
216+
217+ data = {}
218+ statuses = esp .get ("status" , {})
219+ for idx , area in statuses .items ():
220+ stage = Stage (int (area .get ("stage" , "0" )))
221+ start_time = datetime .fromisoformat (area .get ("stage_updated" ))
222+ start_time = start_time .replace (second = 0 , microsecond = 0 )
223+ planned = [
224+ {
225+ ATTR_STAGE : stage ,
226+ ATTR_START_TIME : start_time .astimezone (UTC ),
227+ }
228+ ]
229+
230+ next_stages = area .get ("next_stages" , [])
231+ for i , next_stage in enumerate (next_stages ):
232+ # Prev
233+ prev_end = datetime .fromisoformat (
234+ next_stage .get ("stage_start_timestamp" )
235+ )
236+ prev_end = prev_end .replace (second = 0 , microsecond = 0 )
237+ planned [i ][ATTR_END_TIME ] = prev_end .astimezone (UTC )
238+
239+ # Next
240+ stage = Stage (int (next_stage .get ("stage" , "0" )))
241+ start_time = datetime .fromisoformat (
242+ next_stage .get ("stage_start_timestamp" )
243+ )
221244 start_time = start_time .replace (second = 0 , microsecond = 0 )
222- planned = [
245+ planned . append (
223246 {
224247 ATTR_STAGE : stage ,
225- ATTR_START_TIME : start_time .astimezone (datetime . UTC ),
248+ ATTR_START_TIME : start_time .astimezone (UTC ),
226249 }
227- ]
228-
229- next_stages = area .get ("next_stages" , [])
230- for i , next_stage in enumerate (next_stages ):
231- # Prev
232- prev_end = datetime .fromisoformat (
233- next_stage .get ("stage_start_timestamp" )
234- )
235- prev_end = prev_end .replace (second = 0 , microsecond = 0 )
236- planned [i ][ATTR_END_TIME ] = prev_end .astimezone (datetime .UTC )
237-
238- # Next
239- stage = Stage (int (next_stage .get ("stage" , "0" )))
240- start_time = datetime .fromisoformat (
241- next_stage .get ("stage_start_timestamp" )
242- )
243- start_time = start_time .replace (second = 0 , microsecond = 0 )
244- planned .append (
245- {
246- ATTR_STAGE : stage ,
247- ATTR_START_TIME : start_time .astimezone (datetime .UTC ),
248- }
249- )
250+ )
250251
251- filtered = []
252- for stage in planned :
253- if ATTR_END_TIME not in stage :
254- stage [ATTR_END_TIME ] = stage [ATTR_START_TIME ] + timedelta (
255- days = 7
256- )
257- if ATTR_END_TIME in stage and stage .get (ATTR_END_TIME ) >= now :
258- filtered .append (stage )
252+ filtered = []
253+ for stage in planned :
254+ if ATTR_END_TIME not in stage :
255+ stage [ATTR_END_TIME ] = stage [ATTR_START_TIME ] + timedelta (days = 7 )
256+ if ATTR_END_TIME in stage and stage .get (ATTR_END_TIME ) >= now :
257+ filtered .append (stage )
259258
260- data [idx ] = {
261- ATTR_NAME : area .get ("name" , "" ),
262- ATTR_PLANNED : filtered ,
263- }
259+ data [idx ] = {
260+ ATTR_NAME : area .get ("name" , "" ),
261+ ATTR_PLANNED : filtered ,
262+ }
264263
265264 return data
266265
@@ -289,7 +288,7 @@ def add_area(self, area: Area = None) -> None:
289288 async def _async_update_data (self ) -> dict :
290289 """Retrieve latest load shedding data."""
291290
292- now = datetime .now (datetime . UTC ).replace (microsecond = 0 )
291+ now = datetime .now (UTC ).replace (microsecond = 0 )
293292 diff = 0
294293 if self .last_update is not None :
295294 diff = (now - self .last_update ).seconds
@@ -300,6 +299,9 @@ async def _async_update_data(self) -> dict:
300299
301300 try :
302301 area = await self .async_update_area ()
302+ except SePushError as err :
303+ _LOGGER .error ("Unable to get area schedule: %s" , err )
304+ self .data = {}
303305 except UpdateFailed as err :
304306 _LOGGER .exception ("Unable to get area schedule: %s" , err )
305307 self .data = {}
@@ -315,10 +317,7 @@ async def async_update_area(self) -> dict:
315317 area_id_data : dict = {}
316318
317319 for area in self .areas :
318- try :
319- esp = await self .hass .async_add_executor_job (self .sepush .area , area .id )
320- except SePushError as err :
321- raise UpdateFailed (err ) from err
320+ esp = await self .hass .async_add_executor_job (self .sepush .area , area .id )
322321
323322 # Get events for area
324323 events = []
@@ -332,10 +331,8 @@ async def async_update_area(self) -> dict:
332331 if note == str (Stage .LOAD_REDUCTION ):
333332 stage = Stage .LOAD_REDUCTION
334333
335- start = datetime .fromisoformat (event .get ("start" )).astimezone (
336- datetime .UTC
337- )
338- end = datetime .fromisoformat (event .get ("end" )).astimezone (datetime .UTC )
334+ start = datetime .fromisoformat (event .get ("start" )).astimezone (UTC )
335+ end = datetime .fromisoformat (event .get ("end" )).astimezone (UTC )
339336
340337 events .append (
341338 {
@@ -478,7 +475,7 @@ def utc_dt(date: datetime, time: datetime) -> datetime:
478475 second = 0 ,
479476 microsecond = 0 ,
480477 tzinfo = sast ,
481- ).astimezone (datetime . UTC )
478+ ).astimezone (UTC )
482479
483480
484481class LoadSheddingQuotaCoordinator (DataUpdateCoordinator [dict [str , Any ]]):
@@ -494,9 +491,12 @@ def __init__(self, hass: HomeAssistant, sepush: SePush) -> None:
494491 async def _async_update_data (self ) -> dict :
495492 """Retrieve latest load shedding data."""
496493
497- now = datetime .now (datetime . UTC ).replace (microsecond = 0 )
494+ now = datetime .now (UTC ).replace (microsecond = 0 )
498495 try :
499496 quota = await self .async_update_quota ()
497+ except SePushError as err :
498+ _LOGGER .error ("Unable to get quota: %s" , err )
499+ self .data = {}
500500 except UpdateFailed as err :
501501 _LOGGER .exception ("Unable to get quota: %s" , err )
502502 else :
@@ -507,10 +507,7 @@ async def _async_update_data(self) -> dict:
507507
508508 async def async_update_quota (self ) -> dict :
509509 """Retrieve latest quota."""
510- try :
511- esp = await self .hass .async_add_executor_job (self .sepush .check_allowance )
512- except SePushError as err :
513- raise UpdateFailed (err ) from err
510+ esp = await self .hass .async_add_executor_job (self .sepush .check_allowance )
514511
515512 return esp .get ("allowance" , {})
516513
0 commit comments