Skip to content

Commit 86c4d1f

Browse files
committed
kill deprecated TimeSeriesReadingData
1 parent 154cc7b commit 86c4d1f

File tree

3 files changed

+99
-177
lines changed

3 files changed

+99
-177
lines changed

src/main/java/edu/ie3/datamodel/io/connectors/CsvFileConnector.java

Lines changed: 55 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -221,70 +221,27 @@ public Optional<IndividualTimeSeriesMetaInformation> getIndividualTimeSeriesMeta
221221
}
222222

223223
/**
224-
* Initialises the readers for time series with the specified column schemes. They are given back
225-
* grouped by the column scheme in order to allow for accounting the different content types.
224+
* Receive the information for specific time series They are given back grouped by the column
225+
* scheme in order to allow for accounting the different content types.
226226
*
227227
* @param columnSchemes the column schemes to initialize readers for. If no scheme is given, all
228228
* possible readers will be initialized.
229-
* @return A mapping from column type to respective readers
230-
* @deprecated Don't use {@link TimeSeriesReadingData}, as it contains a reader, that might not be
231-
* closed
229+
* @return A mapping from column scheme to the individual time series meta information
232230
*/
233-
@Deprecated
234-
public Map<ColumnScheme, Set<TimeSeriesReadingData>> initTimeSeriesReader(
235-
ColumnScheme... columnSchemes) {
231+
public Map<ColumnScheme, Set<CsvIndividualTimeSeriesMetaInformation>>
232+
getCsvIndividualTimeSeriesMetaInformation(ColumnScheme... columnSchemes) {
236233
return getIndividualTimeSeriesFilePaths()
237234
.parallelStream()
238235
.map(
239236
pathString -> {
240237
String filePathWithoutEnding = removeFileEnding(pathString);
241-
return buildReadingData(filePathWithoutEnding, columnSchemes);
238+
return buildCsvTimeSeriesMetaInformation(filePathWithoutEnding, columnSchemes);
242239
})
243240
.filter(Optional::isPresent)
244241
.map(Optional::get)
245-
.collect(Collectors.groupingBy(TimeSeriesReadingData::getColumnScheme, Collectors.toSet()));
246-
}
247-
248-
/**
249-
* Initialises a reader to get grip on the file that contains mapping information between
250-
* coordinate id and actual coordinate
251-
*
252-
* @return A {@link BufferedReader}
253-
* @throws FileNotFoundException If the file is not present
254-
*/
255-
public BufferedReader initIdCoordinateReader() throws FileNotFoundException {
256-
String filePath = entityPersistenceNamingStrategy.getIdCoordinateEntityName();
257-
return initReader(filePath);
258-
}
259-
260-
/**
261-
* Returns a set of relative paths strings to time series files, with respect to the base folder
262-
* path
263-
*
264-
* @return A set of relative paths to time series files, with respect to the base folder path
265-
*/
266-
private Set<String> getIndividualTimeSeriesFilePaths() {
267-
Path baseDirectoryPath =
268-
Paths.get(
269-
FilenameUtils.getFullPath(baseDirectoryName)
270-
+ FilenameUtils.getName(baseDirectoryName));
271-
try (Stream<Path> pathStream = Files.walk(baseDirectoryPath)) {
272-
return pathStream
273-
.map(baseDirectoryPath::relativize)
274-
.filter(
275-
path -> {
276-
String withoutEnding = removeFileEnding(path.toString());
277-
return entityPersistenceNamingStrategy
278-
.getIndividualTimeSeriesPattern()
279-
.matcher(withoutEnding)
280-
.matches();
281-
})
282-
.map(Path::toString)
283-
.collect(Collectors.toSet());
284-
} catch (IOException e) {
285-
log.error("Unable to determine time series files readers for time series.", e);
286-
return Collections.emptySet();
287-
}
242+
.collect(
243+
Collectors.groupingBy(
244+
CsvIndividualTimeSeriesMetaInformation::getColumnScheme, Collectors.toSet()));
288245
}
289246

290247
/**
@@ -295,12 +252,9 @@ private Set<String> getIndividualTimeSeriesFilePaths() {
295252
* @param filePathString String describing the path to the time series file
296253
* @param columnSchemes the allowed column schemes. If no scheme is specified, all schemes are
297254
* allowed.
298-
* @return An {@link Optional} to {@link TimeSeriesReadingData}
299-
* @deprecated Don't use {@link TimeSeriesReadingData}, as it contains a reader, that might not be
300-
* closed
255+
* @return An {@link Optional} to {@link IndividualTimeSeriesMetaInformation}
301256
*/
302-
@Deprecated
303-
private Optional<TimeSeriesReadingData> buildReadingData(
257+
private Optional<CsvIndividualTimeSeriesMetaInformation> buildCsvTimeSeriesMetaInformation(
304258
String filePathString, ColumnScheme... columnSchemes) {
305259
try {
306260
FileNameMetaInformation metaInformation =
@@ -327,23 +281,60 @@ private Optional<TimeSeriesReadingData> buildReadingData(
327281
columnSchemes);
328282
return Optional.empty();
329283
}
330-
331-
BufferedReader reader = initReader(filePathString);
332284
return Optional.of(
333-
new TimeSeriesReadingData(
285+
new CsvIndividualTimeSeriesMetaInformation(
334286
individualMetaInformation.getUuid(),
335287
individualMetaInformation.getColumnScheme(),
336-
reader));
337-
} catch (FileNotFoundException e) {
338-
log.error("Cannot init the writer for time series file path '{}'.", filePathString, e);
339-
return Optional.empty();
288+
filePathString));
340289
} catch (IllegalArgumentException e) {
341290
log.error(
342291
"Error during extraction of meta information from file name '{}'.", filePathString, e);
343292
return Optional.empty();
344293
}
345294
}
346295

296+
/**
297+
* Initialises a reader to get grip on the file that contains mapping information between
298+
* coordinate id and actual coordinate
299+
*
300+
* @return A {@link BufferedReader}
301+
* @throws FileNotFoundException If the file is not present
302+
*/
303+
public BufferedReader initIdCoordinateReader() throws FileNotFoundException {
304+
String filePath = entityPersistenceNamingStrategy.getIdCoordinateEntityName();
305+
return initReader(filePath);
306+
}
307+
308+
/**
309+
* Returns a set of relative paths strings to time series files, with respect to the base folder
310+
* path
311+
*
312+
* @return A set of relative paths to time series files, with respect to the base folder path
313+
*/
314+
private Set<String> getIndividualTimeSeriesFilePaths() {
315+
Path baseDirectoryPath =
316+
Paths.get(
317+
FilenameUtils.getFullPath(baseDirectoryName)
318+
+ FilenameUtils.getName(baseDirectoryName));
319+
try (Stream<Path> pathStream = Files.walk(baseDirectoryPath)) {
320+
return pathStream
321+
.map(baseDirectoryPath::relativize)
322+
.filter(
323+
path -> {
324+
String withoutEnding = removeFileEnding(path.toString());
325+
return entityPersistenceNamingStrategy
326+
.getIndividualTimeSeriesPattern()
327+
.matcher(withoutEnding)
328+
.matches();
329+
})
330+
.map(Path::toString)
331+
.collect(Collectors.toSet());
332+
} catch (IOException e) {
333+
log.error("Unable to determine time series files readers for time series.", e);
334+
return Collections.emptySet();
335+
}
336+
}
337+
347338
/**
348339
* Removes the file ending from input string
349340
*
@@ -414,63 +405,6 @@ public void shutdown() {
414405
});
415406
}
416407

417-
/**
418-
* Class to bundle all information, that are necessary to read a single time series
419-
*
420-
* @deprecated Use the {@link CsvIndividualTimeSeriesMetaInformation} and build reader on demand
421-
*/
422-
@Deprecated
423-
public static class TimeSeriesReadingData {
424-
private final UUID uuid;
425-
private final ColumnScheme columnScheme;
426-
private final BufferedReader reader;
427-
428-
public TimeSeriesReadingData(UUID uuid, ColumnScheme columnScheme, BufferedReader reader) {
429-
this.uuid = uuid;
430-
this.columnScheme = columnScheme;
431-
this.reader = reader;
432-
}
433-
434-
public UUID getUuid() {
435-
return uuid;
436-
}
437-
438-
public ColumnScheme getColumnScheme() {
439-
return columnScheme;
440-
}
441-
442-
public BufferedReader getReader() {
443-
return reader;
444-
}
445-
446-
@Override
447-
public boolean equals(Object o) {
448-
if (this == o) return true;
449-
if (!(o instanceof TimeSeriesReadingData)) return false;
450-
TimeSeriesReadingData that = (TimeSeriesReadingData) o;
451-
return uuid.equals(that.uuid)
452-
&& columnScheme == that.columnScheme
453-
&& reader.equals(that.reader);
454-
}
455-
456-
@Override
457-
public int hashCode() {
458-
return Objects.hash(uuid, columnScheme, reader);
459-
}
460-
461-
@Override
462-
public String toString() {
463-
return "TimeSeriesReadingData{"
464-
+ "uuid="
465-
+ uuid
466-
+ ", columnScheme="
467-
+ columnScheme
468-
+ ", reader="
469-
+ reader
470-
+ '}';
471-
}
472-
}
473-
474408
/** Enhancing the {@link IndividualTimeSeriesMetaInformation} with the full path to csv file */
475409
public static class CsvIndividualTimeSeriesMetaInformation
476410
extends IndividualTimeSeriesMetaInformation {

src/main/java/edu/ie3/datamodel/io/source/csv/CsvWeatherSource.java

Lines changed: 38 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import edu.ie3.datamodel.utils.TimeSeriesUtil;
2222
import edu.ie3.util.interval.ClosedInterval;
2323
import java.io.BufferedReader;
24+
import java.io.FileNotFoundException;
2425
import java.io.IOException;
2526
import java.time.ZonedDateTime;
2627
import java.util.*;
@@ -95,15 +96,16 @@ public CsvWeatherSource(
9596
* @return a map of coordinates to their time series
9697
*/
9798
private Map<Point, IndividualTimeSeries<WeatherValue>> getWeatherTimeSeries() {
98-
/* Get only weather time series reader */
99-
Map<ColumnScheme, Set<CsvFileConnector.TimeSeriesReadingData>> colTypeToReadingData =
100-
connector.initTimeSeriesReader(ColumnScheme.WEATHER);
99+
/* Get only weather time series meta information */
100+
Map<ColumnScheme, Set<CsvFileConnector.CsvIndividualTimeSeriesMetaInformation>>
101+
colTypeToMetaData =
102+
connector.getCsvIndividualTimeSeriesMetaInformation(ColumnScheme.WEATHER);
101103

102104
/* Reading in weather time series */
103-
Set<CsvFileConnector.TimeSeriesReadingData> weatherReadingData =
104-
colTypeToReadingData.get(ColumnScheme.WEATHER);
105+
Set<CsvFileConnector.CsvIndividualTimeSeriesMetaInformation> weatherCsvMetaInformation =
106+
colTypeToMetaData.get(ColumnScheme.WEATHER);
105107

106-
return readWeatherTimeSeries(weatherReadingData);
108+
return readWeatherTimeSeries(weatherCsvMetaInformation, connector);
107109
}
108110

109111
@Override
@@ -151,35 +153,43 @@ private Map<Point, IndividualTimeSeries<WeatherValue>> trimMapToInterval(
151153
/**
152154
* Reads weather data to time series and maps them coordinate wise
153155
*
154-
* @param weatherReadingData Data needed for reading
156+
* @param weatherMetaInformation Data needed for reading
155157
* @return time series mapped to the represented coordinate
156158
*/
157159
private Map<Point, IndividualTimeSeries<WeatherValue>> readWeatherTimeSeries(
158-
Set<CsvFileConnector.TimeSeriesReadingData> weatherReadingData) {
160+
Set<CsvFileConnector.CsvIndividualTimeSeriesMetaInformation> weatherMetaInformation,
161+
CsvFileConnector connector) {
159162
final Map<Point, IndividualTimeSeries<WeatherValue>> weatherTimeSeries = new HashMap<>();
160163
Function<Map<String, String>, Optional<TimeBasedValue<WeatherValue>>> fieldToValueFunction =
161164
this::buildWeatherValue;
162-
163165
/* Reading in weather time series */
164-
for (CsvFileConnector.TimeSeriesReadingData data : weatherReadingData) {
165-
filterEmptyOptionals(
166-
buildStreamWithFieldsToAttributesMap(TimeBasedValue.class, data.getReader())
167-
.map(fieldToValueFunction))
168-
.collect(Collectors.groupingBy(tbv -> tbv.getValue().getCoordinate()))
169-
.forEach(
170-
(point, timeBasedValues) -> {
171-
// We have to generate a random UUID as we'd risk running into duplicate key issues
172-
// otherwise
173-
IndividualTimeSeries<WeatherValue> timeSeries =
174-
new IndividualTimeSeries<>(UUID.randomUUID(), new HashSet<>(timeBasedValues));
175-
if (weatherTimeSeries.containsKey(point)) {
176-
IndividualTimeSeries<WeatherValue> mergedTimeSeries =
177-
mergeTimeSeries(weatherTimeSeries.get(point), timeSeries);
178-
weatherTimeSeries.put(point, mergedTimeSeries);
179-
} else {
180-
weatherTimeSeries.put(point, timeSeries);
181-
}
182-
});
166+
for (CsvFileConnector.CsvIndividualTimeSeriesMetaInformation data : weatherMetaInformation) {
167+
// we need a reader for each file
168+
try (BufferedReader reader = connector.initReader(data.getFullFilePath())) {
169+
filterEmptyOptionals(
170+
buildStreamWithFieldsToAttributesMap(TimeBasedValue.class, reader)
171+
.map(fieldToValueFunction))
172+
.collect(Collectors.groupingBy(tbv -> tbv.getValue().getCoordinate()))
173+
.forEach(
174+
(point, timeBasedValues) -> {
175+
// We have to generate a random UUID as we'd risk running into duplicate key
176+
// issues
177+
// otherwise
178+
IndividualTimeSeries<WeatherValue> timeSeries =
179+
new IndividualTimeSeries<>(UUID.randomUUID(), new HashSet<>(timeBasedValues));
180+
if (weatherTimeSeries.containsKey(point)) {
181+
IndividualTimeSeries<WeatherValue> mergedTimeSeries =
182+
mergeTimeSeries(weatherTimeSeries.get(point), timeSeries);
183+
weatherTimeSeries.put(point, mergedTimeSeries);
184+
} else {
185+
weatherTimeSeries.put(point, timeSeries);
186+
}
187+
});
188+
} catch (FileNotFoundException e) {
189+
log.error("Cannot read file {}. File not found!", data.getFullFilePath());
190+
} catch (IOException e) {
191+
log.error("Cannot read file {}. Exception: {}", data.getFullFilePath(), e);
192+
}
183193
}
184194
return weatherTimeSeries;
185195
}

src/test/groovy/edu/ie3/datamodel/io/connectors/CsvFileConnectorTest.groovy

Lines changed: 6 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -115,39 +115,28 @@ class CsvFileConnectorTest extends Specification {
115115
actual == expected
116116
}
117117

118-
def "The csv file connector returns empty Optional of TimeSeriesReadingData when pointed to non-individual time series"() {
118+
def "The csv file connector returns empty Optional of CsvTimeSeriesMetaInformation when pointed to non-individual time series"() {
119119
given:
120120
def pathString = "lpts_h0_53990eea-1b5d-47e8-9134-6d8de36604bf"
121121

122122
when:
123-
def actual = cfc.buildReadingData(pathString)
123+
def actual = cfc.buildCsvTimeSeriesMetaInformation(pathString)
124124

125125
then:
126126
!actual.present
127127
}
128128

129-
def "The csv file connector returns empty Optional of TimeSeriesReadingData when pointed to non-existing file"() {
130-
given:
131-
def pathString = "its_pq_32f38421-f7fd-4295-8f9a-3a54b4e7dba9"
132-
133-
when:
134-
def actual = cfc.buildReadingData(pathString)
135-
136-
then:
137-
!actual.present
138-
}
139-
140-
def "The csv file connector is able to build correct reading information from valid input"() {
129+
def "The csv file connector is able to build correct meta information from valid input"() {
141130
given:
142131
def pathString = "its_pq_53990eea-1b5d-47e8-9134-6d8de36604bf"
143-
def expected = new CsvFileConnector.TimeSeriesReadingData(
132+
def expected = new CsvFileConnector.CsvIndividualTimeSeriesMetaInformation(
144133
UUID.fromString("53990eea-1b5d-47e8-9134-6d8de36604bf"),
145134
ColumnScheme.APPARENT_POWER,
146-
Mock(BufferedReader)
135+
""
147136
)
148137

149138
when:
150-
def actual = cfc.buildReadingData(pathString)
139+
def actual = cfc.buildCsvTimeSeriesMetaInformation(pathString)
151140

152141
then:
153142
actual.present
@@ -158,17 +147,6 @@ class CsvFileConnectorTest extends Specification {
158147
}
159148
}
160149

161-
def "The csv file connector is able to init readers for all time series files"() {
162-
when:
163-
def actual = cfc.initTimeSeriesReader()
164-
165-
then:
166-
actual.size() == 5
167-
def energyPriceEntries = actual.get(ColumnScheme.ENERGY_PRICE)
168-
Objects.nonNull(energyPriceEntries)
169-
energyPriceEntries.size() == 2
170-
}
171-
172150
def "The csv file connector throws an Exception, if the foreseen file cannot be found"() {
173151
given:
174152
def cfc = new CsvFileConnector(tmpDirectory.toString(), new HierarchicFileNamingStrategy(new DefaultDirectoryHierarchy(tmpDirectory.toString(), "test")))

0 commit comments

Comments
 (0)